1 /*
2 *
3 * hfcpci.c low level driver for CCD's hfc-pci based cards
4 *
5 * Author Werner Cornelius (werner@isdn4linux.de)
6 * based on existing driver for CCD hfc ISA cards
7 * type approval valid for HFC-S PCI A based card
8 *
9 * Copyright 1999 by Werner Cornelius (werner@isdn-development.de)
10 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 * Module options:
27 *
28 * debug:
29 * NOTE: only one poll value must be given for all cards
30 * See hfc_pci.h for debug flags.
31 *
32 * poll:
33 * NOTE: only one poll value must be given for all cards
34 * Give the number of samples for each fifo process.
35 * By default 128 is used. Decrease to reduce delay, increase to
36 * reduce cpu load. If unsure, don't mess with it!
37 * A value of 128 will use controller's interrupt. Other values will
38 * use kernel timer, because the controller will not allow lower values
39 * than 128.
40 * Also note that the value depends on the kernel timer frequency.
41 * If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible.
42 * If the kernel uses 100 Hz, steps of 80 samples are possible.
43 * If the kernel uses 300 Hz, steps of about 26 samples are possible.
44 *
45 */
46
47 #include <linux/interrupt.h>
48 #include <linux/module.h>
49 #include <linux/pci.h>
50 #include <linux/delay.h>
51 #include <linux/mISDNhw.h>
52 #include <linux/slab.h>
53
54 #include "hfc_pci.h"
55
56 static const char *hfcpci_revision = "2.0";
57
58 static int HFC_cnt;
59 static uint debug;
60 static uint poll, tics;
61 static struct timer_list hfc_tl;
62 static unsigned long hfc_jiffies;
63
64 MODULE_AUTHOR("Karsten Keil");
65 MODULE_LICENSE("GPL");
66 module_param(debug, uint, S_IRUGO | S_IWUSR);
67 module_param(poll, uint, S_IRUGO | S_IWUSR);
68
69 enum {
70 HFC_CCD_2BD0,
71 HFC_CCD_B000,
72 HFC_CCD_B006,
73 HFC_CCD_B007,
74 HFC_CCD_B008,
75 HFC_CCD_B009,
76 HFC_CCD_B00A,
77 HFC_CCD_B00B,
78 HFC_CCD_B00C,
79 HFC_CCD_B100,
80 HFC_CCD_B700,
81 HFC_CCD_B701,
82 HFC_ASUS_0675,
83 HFC_BERKOM_A1T,
84 HFC_BERKOM_TCONCEPT,
85 HFC_ANIGMA_MC145575,
86 HFC_ZOLTRIX_2BD0,
87 HFC_DIGI_DF_M_IOM2_E,
88 HFC_DIGI_DF_M_E,
89 HFC_DIGI_DF_M_IOM2_A,
90 HFC_DIGI_DF_M_A,
91 HFC_ABOCOM_2BD1,
92 HFC_SITECOM_DC105V2,
93 };
94
95 struct hfcPCI_hw {
96 unsigned char cirm;
97 unsigned char ctmt;
98 unsigned char clkdel;
99 unsigned char states;
100 unsigned char conn;
101 unsigned char mst_m;
102 unsigned char int_m1;
103 unsigned char int_m2;
104 unsigned char sctrl;
105 unsigned char sctrl_r;
106 unsigned char sctrl_e;
107 unsigned char trm;
108 unsigned char fifo_en;
109 unsigned char bswapped;
110 unsigned char protocol;
111 int nt_timer;
112 unsigned char __iomem *pci_io; /* start of PCI IO memory */
113 dma_addr_t dmahandle;
114 void *fifos; /* FIFO memory */
115 int last_bfifo_cnt[2];
116 /* marker saving last b-fifo frame count */
117 struct timer_list timer;
118 };
119
120 #define HFC_CFG_MASTER 1
121 #define HFC_CFG_SLAVE 2
122 #define HFC_CFG_PCM 3
123 #define HFC_CFG_2HFC 4
124 #define HFC_CFG_SLAVEHFC 5
125 #define HFC_CFG_NEG_F0 6
126 #define HFC_CFG_SW_DD_DU 7
127
128 #define FLG_HFC_TIMER_T1 16
129 #define FLG_HFC_TIMER_T3 17
130
131 #define NT_T1_COUNT 1120 /* number of 3.125ms interrupts (3.5s) */
132 #define NT_T3_COUNT 31 /* number of 3.125ms interrupts (97 ms) */
133 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
134 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
135
136
137 struct hfc_pci {
138 u_char subtype;
139 u_char chanlimit;
140 u_char initdone;
141 u_long cfg;
142 u_int irq;
143 u_int irqcnt;
144 struct pci_dev *pdev;
145 struct hfcPCI_hw hw;
146 spinlock_t lock; /* card lock */
147 struct dchannel dch;
148 struct bchannel bch[2];
149 };
150
151 /* Interface functions */
152 static void
enable_hwirq(struct hfc_pci * hc)153 enable_hwirq(struct hfc_pci *hc)
154 {
155 hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE;
156 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
157 }
158
159 static void
disable_hwirq(struct hfc_pci * hc)160 disable_hwirq(struct hfc_pci *hc)
161 {
162 hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE);
163 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
164 }
165
166 /*
167 * free hardware resources used by driver
168 */
169 static void
release_io_hfcpci(struct hfc_pci * hc)170 release_io_hfcpci(struct hfc_pci *hc)
171 {
172 /* disable memory mapped ports + busmaster */
173 pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
174 del_timer(&hc->hw.timer);
175 pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle);
176 iounmap(hc->hw.pci_io);
177 }
178
179 /*
180 * set mode (NT or TE)
181 */
182 static void
hfcpci_setmode(struct hfc_pci * hc)183 hfcpci_setmode(struct hfc_pci *hc)
184 {
185 if (hc->hw.protocol == ISDN_P_NT_S0) {
186 hc->hw.clkdel = CLKDEL_NT; /* ST-Bit delay for NT-Mode */
187 hc->hw.sctrl |= SCTRL_MODE_NT; /* NT-MODE */
188 hc->hw.states = 1; /* G1 */
189 } else {
190 hc->hw.clkdel = CLKDEL_TE; /* ST-Bit delay for TE-Mode */
191 hc->hw.sctrl &= ~SCTRL_MODE_NT; /* TE-MODE */
192 hc->hw.states = 2; /* F2 */
193 }
194 Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel);
195 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states);
196 udelay(10);
197 Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */
198 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
199 }
200
201 /*
202 * function called to reset the HFC PCI chip. A complete software reset of chip
203 * and fifos is done.
204 */
205 static void
reset_hfcpci(struct hfc_pci * hc)206 reset_hfcpci(struct hfc_pci *hc)
207 {
208 u_char val;
209 int cnt = 0;
210
211 printk(KERN_DEBUG "reset_hfcpci: entered\n");
212 val = Read_hfc(hc, HFCPCI_CHIP_ID);
213 printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val);
214 /* enable memory mapped ports, disable busmaster */
215 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
216 disable_hwirq(hc);
217 /* enable memory ports + busmaster */
218 pci_write_config_word(hc->pdev, PCI_COMMAND,
219 PCI_ENA_MEMIO + PCI_ENA_MASTER);
220 val = Read_hfc(hc, HFCPCI_STATUS);
221 printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val);
222 hc->hw.cirm = HFCPCI_RESET; /* Reset On */
223 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
224 set_current_state(TASK_UNINTERRUPTIBLE);
225 mdelay(10); /* Timeout 10ms */
226 hc->hw.cirm = 0; /* Reset Off */
227 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
228 val = Read_hfc(hc, HFCPCI_STATUS);
229 printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val);
230 while (cnt < 50000) { /* max 50000 us */
231 udelay(5);
232 cnt += 5;
233 val = Read_hfc(hc, HFCPCI_STATUS);
234 if (!(val & 2))
235 break;
236 }
237 printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt);
238
239 hc->hw.fifo_en = 0x30; /* only D fifos enabled */
240
241 hc->hw.bswapped = 0; /* no exchange */
242 hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
243 hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
244 hc->hw.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
245 hc->hw.sctrl_r = 0;
246 hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE; /* S/T Auto awake */
247 hc->hw.mst_m = 0;
248 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
249 hc->hw.mst_m |= HFCPCI_MASTER; /* HFC Master Mode */
250 if (test_bit(HFC_CFG_NEG_F0, &hc->cfg))
251 hc->hw.mst_m |= HFCPCI_F0_NEGATIV;
252 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
253 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
254 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
255 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
256
257 hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
258 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
259 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
260
261 /* Clear already pending ints */
262 val = Read_hfc(hc, HFCPCI_INT_S1);
263
264 /* set NT/TE mode */
265 hfcpci_setmode(hc);
266
267 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
268 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
269
270 /*
271 * Init GCI/IOM2 in master mode
272 * Slots 0 and 1 are set for B-chan 1 and 2
273 * D- and monitor/CI channel are not enabled
274 * STIO1 is used as output for data, B1+B2 from ST->IOM+HFC
275 * STIO2 is used as data input, B1+B2 from IOM->ST
276 * ST B-channel send disabled -> continuous 1s
277 * The IOM slots are always enabled
278 */
279 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
280 /* set data flow directions: connect B1,B2: HFC to/from PCM */
281 hc->hw.conn = 0x09;
282 } else {
283 hc->hw.conn = 0x36; /* set data flow directions */
284 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
285 Write_hfc(hc, HFCPCI_B1_SSL, 0xC0);
286 Write_hfc(hc, HFCPCI_B2_SSL, 0xC1);
287 Write_hfc(hc, HFCPCI_B1_RSL, 0xC0);
288 Write_hfc(hc, HFCPCI_B2_RSL, 0xC1);
289 } else {
290 Write_hfc(hc, HFCPCI_B1_SSL, 0x80);
291 Write_hfc(hc, HFCPCI_B2_SSL, 0x81);
292 Write_hfc(hc, HFCPCI_B1_RSL, 0x80);
293 Write_hfc(hc, HFCPCI_B2_RSL, 0x81);
294 }
295 }
296 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
297 val = Read_hfc(hc, HFCPCI_INT_S2);
298 }
299
300 /*
301 * Timer function called when kernel timer expires
302 */
303 static void
hfcpci_Timer(struct timer_list * t)304 hfcpci_Timer(struct timer_list *t)
305 {
306 struct hfc_pci *hc = from_timer(hc, t, hw.timer);
307 hc->hw.timer.expires = jiffies + 75;
308 /* WD RESET */
309 /*
310 * WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80);
311 * add_timer(&hc->hw.timer);
312 */
313 }
314
315
316 /*
317 * select a b-channel entry matching and active
318 */
319 static struct bchannel *
Sel_BCS(struct hfc_pci * hc,int channel)320 Sel_BCS(struct hfc_pci *hc, int channel)
321 {
322 if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) &&
323 (hc->bch[0].nr & channel))
324 return &hc->bch[0];
325 else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) &&
326 (hc->bch[1].nr & channel))
327 return &hc->bch[1];
328 else
329 return NULL;
330 }
331
332 /*
333 * clear the desired B-channel rx fifo
334 */
335 static void
hfcpci_clear_fifo_rx(struct hfc_pci * hc,int fifo)336 hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo)
337 {
338 u_char fifo_state;
339 struct bzfifo *bzr;
340
341 if (fifo) {
342 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
343 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX;
344 } else {
345 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
346 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX;
347 }
348 if (fifo_state)
349 hc->hw.fifo_en ^= fifo_state;
350 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
351 hc->hw.last_bfifo_cnt[fifo] = 0;
352 bzr->f1 = MAX_B_FRAMES;
353 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
354 bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
355 bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16(
356 le16_to_cpu(bzr->za[MAX_B_FRAMES].z1));
357 if (fifo_state)
358 hc->hw.fifo_en |= fifo_state;
359 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
360 }
361
362 /*
363 * clear the desired B-channel tx fifo
364 */
hfcpci_clear_fifo_tx(struct hfc_pci * hc,int fifo)365 static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
366 {
367 u_char fifo_state;
368 struct bzfifo *bzt;
369
370 if (fifo) {
371 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
372 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX;
373 } else {
374 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
375 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX;
376 }
377 if (fifo_state)
378 hc->hw.fifo_en ^= fifo_state;
379 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
380 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
381 printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) "
382 "z1(%x) z2(%x) state(%x)\n",
383 fifo, bzt->f1, bzt->f2,
384 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
385 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2),
386 fifo_state);
387 bzt->f2 = MAX_B_FRAMES;
388 bzt->f1 = bzt->f2; /* init F pointers to remain constant */
389 bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
390 bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2);
391 if (fifo_state)
392 hc->hw.fifo_en |= fifo_state;
393 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
394 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
395 printk(KERN_DEBUG
396 "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n",
397 fifo, bzt->f1, bzt->f2,
398 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
399 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2));
400 }
401
402 /*
403 * read a complete B-frame out of the buffer
404 */
405 static void
hfcpci_empty_bfifo(struct bchannel * bch,struct bzfifo * bz,u_char * bdata,int count)406 hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
407 u_char *bdata, int count)
408 {
409 u_char *ptr, *ptr1, new_f2;
410 int maxlen, new_z2;
411 struct zt *zp;
412
413 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
414 printk(KERN_DEBUG "hfcpci_empty_fifo\n");
415 zp = &bz->za[bz->f2]; /* point to Z-Regs */
416 new_z2 = le16_to_cpu(zp->z2) + count; /* new position in fifo */
417 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
418 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
419 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
420 if ((count > MAX_DATA_SIZE + 3) || (count < 4) ||
421 (*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) {
422 if (bch->debug & DEBUG_HW)
423 printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet "
424 "invalid length %d or crc\n", count);
425 #ifdef ERROR_STATISTIC
426 bch->err_inv++;
427 #endif
428 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
429 bz->f2 = new_f2; /* next buffer */
430 } else {
431 bch->rx_skb = mI_alloc_skb(count - 3, GFP_ATOMIC);
432 if (!bch->rx_skb) {
433 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
434 return;
435 }
436 count -= 3;
437 ptr = skb_put(bch->rx_skb, count);
438
439 if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL)
440 maxlen = count; /* complete transfer */
441 else
442 maxlen = B_FIFO_SIZE + B_SUB_VAL -
443 le16_to_cpu(zp->z2); /* maximum */
444
445 ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL);
446 /* start of data */
447 memcpy(ptr, ptr1, maxlen); /* copy data */
448 count -= maxlen;
449
450 if (count) { /* rest remaining */
451 ptr += maxlen;
452 ptr1 = bdata; /* start of buffer */
453 memcpy(ptr, ptr1, count); /* rest */
454 }
455 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
456 bz->f2 = new_f2; /* next buffer */
457 recv_Bchannel(bch, MISDN_ID_ANY, false);
458 }
459 }
460
461 /*
462 * D-channel receive procedure
463 */
464 static int
receive_dmsg(struct hfc_pci * hc)465 receive_dmsg(struct hfc_pci *hc)
466 {
467 struct dchannel *dch = &hc->dch;
468 int maxlen;
469 int rcnt, total;
470 int count = 5;
471 u_char *ptr, *ptr1;
472 struct dfifo *df;
473 struct zt *zp;
474
475 df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx;
476 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
477 zp = &df->za[df->f2 & D_FREG_MASK];
478 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
479 if (rcnt < 0)
480 rcnt += D_FIFO_SIZE;
481 rcnt++;
482 if (dch->debug & DEBUG_HW_DCHANNEL)
483 printk(KERN_DEBUG
484 "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n",
485 df->f1, df->f2,
486 le16_to_cpu(zp->z1),
487 le16_to_cpu(zp->z2),
488 rcnt);
489
490 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
491 (df->data[le16_to_cpu(zp->z1)])) {
492 if (dch->debug & DEBUG_HW)
493 printk(KERN_DEBUG
494 "empty_fifo hfcpci packet inv. len "
495 "%d or crc %d\n",
496 rcnt,
497 df->data[le16_to_cpu(zp->z1)]);
498 #ifdef ERROR_STATISTIC
499 cs->err_rx++;
500 #endif
501 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
502 (MAX_D_FRAMES + 1); /* next buffer */
503 df->za[df->f2 & D_FREG_MASK].z2 =
504 cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) &
505 (D_FIFO_SIZE - 1));
506 } else {
507 dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
508 if (!dch->rx_skb) {
509 printk(KERN_WARNING
510 "HFC-PCI: D receive out of memory\n");
511 break;
512 }
513 total = rcnt;
514 rcnt -= 3;
515 ptr = skb_put(dch->rx_skb, rcnt);
516
517 if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE)
518 maxlen = rcnt; /* complete transfer */
519 else
520 maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2);
521 /* maximum */
522
523 ptr1 = df->data + le16_to_cpu(zp->z2);
524 /* start of data */
525 memcpy(ptr, ptr1, maxlen); /* copy data */
526 rcnt -= maxlen;
527
528 if (rcnt) { /* rest remaining */
529 ptr += maxlen;
530 ptr1 = df->data; /* start of buffer */
531 memcpy(ptr, ptr1, rcnt); /* rest */
532 }
533 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
534 (MAX_D_FRAMES + 1); /* next buffer */
535 df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16((
536 le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1));
537 recv_Dchannel(dch);
538 }
539 }
540 return 1;
541 }
542
543 /*
544 * check for transparent receive data and read max one 'poll' size if avail
545 */
546 static void
hfcpci_empty_fifo_trans(struct bchannel * bch,struct bzfifo * rxbz,struct bzfifo * txbz,u_char * bdata)547 hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
548 struct bzfifo *txbz, u_char *bdata)
549 {
550 __le16 *z1r, *z2r, *z1t, *z2t;
551 int new_z2, fcnt_rx, fcnt_tx, maxlen;
552 u_char *ptr, *ptr1;
553
554 z1r = &rxbz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
555 z2r = z1r + 1;
556 z1t = &txbz->za[MAX_B_FRAMES].z1;
557 z2t = z1t + 1;
558
559 fcnt_rx = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
560 if (!fcnt_rx)
561 return; /* no data avail */
562
563 if (fcnt_rx <= 0)
564 fcnt_rx += B_FIFO_SIZE; /* bytes actually buffered */
565 new_z2 = le16_to_cpu(*z2r) + fcnt_rx; /* new position in fifo */
566 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
567 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
568
569 fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
570 if (fcnt_tx <= 0)
571 fcnt_tx += B_FIFO_SIZE;
572 /* fcnt_tx contains available bytes in tx-fifo */
573 fcnt_tx = B_FIFO_SIZE - fcnt_tx;
574 /* remaining bytes to send (bytes in tx-fifo) */
575
576 if (test_bit(FLG_RX_OFF, &bch->Flags)) {
577 bch->dropcnt += fcnt_rx;
578 *z2r = cpu_to_le16(new_z2);
579 return;
580 }
581 maxlen = bchannel_get_rxbuf(bch, fcnt_rx);
582 if (maxlen < 0) {
583 pr_warning("B%d: No bufferspace for %d bytes\n",
584 bch->nr, fcnt_rx);
585 } else {
586 ptr = skb_put(bch->rx_skb, fcnt_rx);
587 if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL)
588 maxlen = fcnt_rx; /* complete transfer */
589 else
590 maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r);
591 /* maximum */
592
593 ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL);
594 /* start of data */
595 memcpy(ptr, ptr1, maxlen); /* copy data */
596 fcnt_rx -= maxlen;
597
598 if (fcnt_rx) { /* rest remaining */
599 ptr += maxlen;
600 ptr1 = bdata; /* start of buffer */
601 memcpy(ptr, ptr1, fcnt_rx); /* rest */
602 }
603 recv_Bchannel(bch, fcnt_tx, false); /* bch, id, !force */
604 }
605 *z2r = cpu_to_le16(new_z2); /* new position */
606 }
607
608 /*
609 * B-channel main receive routine
610 */
611 static void
main_rec_hfcpci(struct bchannel * bch)612 main_rec_hfcpci(struct bchannel *bch)
613 {
614 struct hfc_pci *hc = bch->hw;
615 int rcnt, real_fifo;
616 int receive = 0, count = 5;
617 struct bzfifo *txbz, *rxbz;
618 u_char *bdata;
619 struct zt *zp;
620
621 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
622 rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
623 txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
624 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
625 real_fifo = 1;
626 } else {
627 rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
628 txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
629 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1;
630 real_fifo = 0;
631 }
632 Begin:
633 count--;
634 if (rxbz->f1 != rxbz->f2) {
635 if (bch->debug & DEBUG_HW_BCHANNEL)
636 printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n",
637 bch->nr, rxbz->f1, rxbz->f2);
638 zp = &rxbz->za[rxbz->f2];
639
640 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
641 if (rcnt < 0)
642 rcnt += B_FIFO_SIZE;
643 rcnt++;
644 if (bch->debug & DEBUG_HW_BCHANNEL)
645 printk(KERN_DEBUG
646 "hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n",
647 bch->nr, le16_to_cpu(zp->z1),
648 le16_to_cpu(zp->z2), rcnt);
649 hfcpci_empty_bfifo(bch, rxbz, bdata, rcnt);
650 rcnt = rxbz->f1 - rxbz->f2;
651 if (rcnt < 0)
652 rcnt += MAX_B_FRAMES + 1;
653 if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) {
654 rcnt = 0;
655 hfcpci_clear_fifo_rx(hc, real_fifo);
656 }
657 hc->hw.last_bfifo_cnt[real_fifo] = rcnt;
658 if (rcnt > 1)
659 receive = 1;
660 else
661 receive = 0;
662 } else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
663 hfcpci_empty_fifo_trans(bch, rxbz, txbz, bdata);
664 return;
665 } else
666 receive = 0;
667 if (count && receive)
668 goto Begin;
669
670 }
671
672 /*
673 * D-channel send routine
674 */
675 static void
hfcpci_fill_dfifo(struct hfc_pci * hc)676 hfcpci_fill_dfifo(struct hfc_pci *hc)
677 {
678 struct dchannel *dch = &hc->dch;
679 int fcnt;
680 int count, new_z1, maxlen;
681 struct dfifo *df;
682 u_char *src, *dst, new_f1;
683
684 if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO))
685 printk(KERN_DEBUG "%s\n", __func__);
686
687 if (!dch->tx_skb)
688 return;
689 count = dch->tx_skb->len - dch->tx_idx;
690 if (count <= 0)
691 return;
692 df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx;
693
694 if (dch->debug & DEBUG_HW_DFIFO)
695 printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__,
696 df->f1, df->f2,
697 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1));
698 fcnt = df->f1 - df->f2; /* frame count actually buffered */
699 if (fcnt < 0)
700 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
701 if (fcnt > (MAX_D_FRAMES - 1)) {
702 if (dch->debug & DEBUG_HW_DCHANNEL)
703 printk(KERN_DEBUG
704 "hfcpci_fill_Dfifo more as 14 frames\n");
705 #ifdef ERROR_STATISTIC
706 cs->err_tx++;
707 #endif
708 return;
709 }
710 /* now determine free bytes in FIFO buffer */
711 maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) -
712 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1;
713 if (maxlen <= 0)
714 maxlen += D_FIFO_SIZE; /* count now contains available bytes */
715
716 if (dch->debug & DEBUG_HW_DCHANNEL)
717 printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n",
718 count, maxlen);
719 if (count > maxlen) {
720 if (dch->debug & DEBUG_HW_DCHANNEL)
721 printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n");
722 return;
723 }
724 new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) &
725 (D_FIFO_SIZE - 1);
726 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
727 src = dch->tx_skb->data + dch->tx_idx; /* source pointer */
728 dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
729 maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
730 /* end fifo */
731 if (maxlen > count)
732 maxlen = count; /* limit size */
733 memcpy(dst, src, maxlen); /* first copy */
734
735 count -= maxlen; /* remaining bytes */
736 if (count) {
737 dst = df->data; /* start of buffer */
738 src += maxlen; /* new position */
739 memcpy(dst, src, count);
740 }
741 df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
742 /* for next buffer */
743 df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
744 /* new pos actual buffer */
745 df->f1 = new_f1; /* next frame */
746 dch->tx_idx = dch->tx_skb->len;
747 }
748
749 /*
750 * B-channel send routine
751 */
752 static void
hfcpci_fill_fifo(struct bchannel * bch)753 hfcpci_fill_fifo(struct bchannel *bch)
754 {
755 struct hfc_pci *hc = bch->hw;
756 int maxlen, fcnt;
757 int count, new_z1;
758 struct bzfifo *bz;
759 u_char *bdata;
760 u_char new_f1, *src, *dst;
761 __le16 *z1t, *z2t;
762
763 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
764 printk(KERN_DEBUG "%s\n", __func__);
765 if ((!bch->tx_skb) || bch->tx_skb->len == 0) {
766 if (!test_bit(FLG_FILLEMPTY, &bch->Flags) &&
767 !test_bit(FLG_TRANSPARENT, &bch->Flags))
768 return;
769 count = HFCPCI_FILLEMPTY;
770 } else {
771 count = bch->tx_skb->len - bch->tx_idx;
772 }
773 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
774 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
775 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
776 } else {
777 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
778 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1;
779 }
780
781 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
782 z1t = &bz->za[MAX_B_FRAMES].z1;
783 z2t = z1t + 1;
784 if (bch->debug & DEBUG_HW_BCHANNEL)
785 printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) "
786 "cnt(%d) z1(%x) z2(%x)\n", bch->nr, count,
787 le16_to_cpu(*z1t), le16_to_cpu(*z2t));
788 fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
789 if (fcnt <= 0)
790 fcnt += B_FIFO_SIZE;
791 if (test_bit(FLG_FILLEMPTY, &bch->Flags)) {
792 /* fcnt contains available bytes in fifo */
793 if (count > fcnt)
794 count = fcnt;
795 new_z1 = le16_to_cpu(*z1t) + count;
796 /* new buffer Position */
797 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
798 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
799 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
800 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
801 /* end of fifo */
802 if (bch->debug & DEBUG_HW_BFIFO)
803 printk(KERN_DEBUG "hfcpci_FFt fillempty "
804 "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n",
805 fcnt, maxlen, new_z1, dst);
806 if (maxlen > count)
807 maxlen = count; /* limit size */
808 memset(dst, bch->fill[0], maxlen); /* first copy */
809 count -= maxlen; /* remaining bytes */
810 if (count) {
811 dst = bdata; /* start of buffer */
812 memset(dst, bch->fill[0], count);
813 }
814 *z1t = cpu_to_le16(new_z1); /* now send data */
815 return;
816 }
817 /* fcnt contains available bytes in fifo */
818 fcnt = B_FIFO_SIZE - fcnt;
819 /* remaining bytes to send (bytes in fifo) */
820
821 next_t_frame:
822 count = bch->tx_skb->len - bch->tx_idx;
823 /* maximum fill shall be poll*2 */
824 if (count > (poll << 1) - fcnt)
825 count = (poll << 1) - fcnt;
826 if (count <= 0)
827 return;
828 /* data is suitable for fifo */
829 new_z1 = le16_to_cpu(*z1t) + count;
830 /* new buffer Position */
831 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
832 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
833 src = bch->tx_skb->data + bch->tx_idx;
834 /* source pointer */
835 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
836 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
837 /* end of fifo */
838 if (bch->debug & DEBUG_HW_BFIFO)
839 printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) "
840 "maxl(%d) nz1(%x) dst(%p)\n",
841 fcnt, maxlen, new_z1, dst);
842 fcnt += count;
843 bch->tx_idx += count;
844 if (maxlen > count)
845 maxlen = count; /* limit size */
846 memcpy(dst, src, maxlen); /* first copy */
847 count -= maxlen; /* remaining bytes */
848 if (count) {
849 dst = bdata; /* start of buffer */
850 src += maxlen; /* new position */
851 memcpy(dst, src, count);
852 }
853 *z1t = cpu_to_le16(new_z1); /* now send data */
854 if (bch->tx_idx < bch->tx_skb->len)
855 return;
856 dev_kfree_skb(bch->tx_skb);
857 if (get_next_bframe(bch))
858 goto next_t_frame;
859 return;
860 }
861 if (bch->debug & DEBUG_HW_BCHANNEL)
862 printk(KERN_DEBUG
863 "%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n",
864 __func__, bch->nr, bz->f1, bz->f2,
865 bz->za[bz->f1].z1);
866 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
867 if (fcnt < 0)
868 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
869 if (fcnt > (MAX_B_FRAMES - 1)) {
870 if (bch->debug & DEBUG_HW_BCHANNEL)
871 printk(KERN_DEBUG
872 "hfcpci_fill_Bfifo more as 14 frames\n");
873 return;
874 }
875 /* now determine free bytes in FIFO buffer */
876 maxlen = le16_to_cpu(bz->za[bz->f2].z2) -
877 le16_to_cpu(bz->za[bz->f1].z1) - 1;
878 if (maxlen <= 0)
879 maxlen += B_FIFO_SIZE; /* count now contains available bytes */
880
881 if (bch->debug & DEBUG_HW_BCHANNEL)
882 printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n",
883 bch->nr, count, maxlen);
884
885 if (maxlen < count) {
886 if (bch->debug & DEBUG_HW_BCHANNEL)
887 printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n");
888 return;
889 }
890 new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count;
891 /* new buffer Position */
892 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
893 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
894
895 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
896 src = bch->tx_skb->data + bch->tx_idx; /* source pointer */
897 dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL);
898 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1);
899 /* end fifo */
900 if (maxlen > count)
901 maxlen = count; /* limit size */
902 memcpy(dst, src, maxlen); /* first copy */
903
904 count -= maxlen; /* remaining bytes */
905 if (count) {
906 dst = bdata; /* start of buffer */
907 src += maxlen; /* new position */
908 memcpy(dst, src, count);
909 }
910 bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
911 bz->f1 = new_f1; /* next frame */
912 dev_kfree_skb(bch->tx_skb);
913 get_next_bframe(bch);
914 }
915
916
917
918 /*
919 * handle L1 state changes TE
920 */
921
922 static void
ph_state_te(struct dchannel * dch)923 ph_state_te(struct dchannel *dch)
924 {
925 if (dch->debug)
926 printk(KERN_DEBUG "%s: TE newstate %x\n",
927 __func__, dch->state);
928 switch (dch->state) {
929 case 0:
930 l1_event(dch->l1, HW_RESET_IND);
931 break;
932 case 3:
933 l1_event(dch->l1, HW_DEACT_IND);
934 break;
935 case 5:
936 case 8:
937 l1_event(dch->l1, ANYSIGNAL);
938 break;
939 case 6:
940 l1_event(dch->l1, INFO2);
941 break;
942 case 7:
943 l1_event(dch->l1, INFO4_P8);
944 break;
945 }
946 }
947
948 /*
949 * handle L1 state changes NT
950 */
951
952 static void
handle_nt_timer3(struct dchannel * dch)953 handle_nt_timer3(struct dchannel *dch) {
954 struct hfc_pci *hc = dch->hw;
955
956 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
957 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
958 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
959 hc->hw.nt_timer = 0;
960 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
961 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
962 hc->hw.mst_m |= HFCPCI_MASTER;
963 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
964 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
965 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
966 }
967
968 static void
ph_state_nt(struct dchannel * dch)969 ph_state_nt(struct dchannel *dch)
970 {
971 struct hfc_pci *hc = dch->hw;
972
973 if (dch->debug)
974 printk(KERN_DEBUG "%s: NT newstate %x\n",
975 __func__, dch->state);
976 switch (dch->state) {
977 case 2:
978 if (hc->hw.nt_timer < 0) {
979 hc->hw.nt_timer = 0;
980 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
981 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
982 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
983 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
984 /* Clear already pending ints */
985 (void) Read_hfc(hc, HFCPCI_INT_S1);
986 Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
987 udelay(10);
988 Write_hfc(hc, HFCPCI_STATES, 4);
989 dch->state = 4;
990 } else if (hc->hw.nt_timer == 0) {
991 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
992 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
993 hc->hw.nt_timer = NT_T1_COUNT;
994 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
995 hc->hw.ctmt |= HFCPCI_TIM3_125;
996 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
997 HFCPCI_CLTIMER);
998 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
999 test_and_set_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1000 /* allow G2 -> G3 transition */
1001 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
1002 } else {
1003 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
1004 }
1005 break;
1006 case 1:
1007 hc->hw.nt_timer = 0;
1008 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
1009 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1010 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1011 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1012 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
1013 hc->hw.mst_m &= ~HFCPCI_MASTER;
1014 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1015 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1016 _queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
1017 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1018 break;
1019 case 4:
1020 hc->hw.nt_timer = 0;
1021 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
1022 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1023 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1024 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1025 break;
1026 case 3:
1027 if (!test_and_set_bit(FLG_HFC_TIMER_T3, &dch->Flags)) {
1028 if (!test_and_clear_bit(FLG_L2_ACTIVATED,
1029 &dch->Flags)) {
1030 handle_nt_timer3(dch);
1031 break;
1032 }
1033 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1034 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
1035 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1036 hc->hw.nt_timer = NT_T3_COUNT;
1037 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
1038 hc->hw.ctmt |= HFCPCI_TIM3_125;
1039 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
1040 HFCPCI_CLTIMER);
1041 }
1042 break;
1043 }
1044 }
1045
1046 static void
ph_state(struct dchannel * dch)1047 ph_state(struct dchannel *dch)
1048 {
1049 struct hfc_pci *hc = dch->hw;
1050
1051 if (hc->hw.protocol == ISDN_P_NT_S0) {
1052 if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) &&
1053 hc->hw.nt_timer < 0)
1054 handle_nt_timer3(dch);
1055 else
1056 ph_state_nt(dch);
1057 } else
1058 ph_state_te(dch);
1059 }
1060
1061 /*
1062 * Layer 1 callback function
1063 */
1064 static int
hfc_l1callback(struct dchannel * dch,u_int cmd)1065 hfc_l1callback(struct dchannel *dch, u_int cmd)
1066 {
1067 struct hfc_pci *hc = dch->hw;
1068
1069 switch (cmd) {
1070 case INFO3_P8:
1071 case INFO3_P10:
1072 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1073 hc->hw.mst_m |= HFCPCI_MASTER;
1074 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1075 break;
1076 case HW_RESET_REQ:
1077 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3);
1078 /* HFC ST 3 */
1079 udelay(6);
1080 Write_hfc(hc, HFCPCI_STATES, 3); /* HFC ST 2 */
1081 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1082 hc->hw.mst_m |= HFCPCI_MASTER;
1083 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1084 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1085 HFCPCI_DO_ACTION);
1086 l1_event(dch->l1, HW_POWERUP_IND);
1087 break;
1088 case HW_DEACT_REQ:
1089 hc->hw.mst_m &= ~HFCPCI_MASTER;
1090 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1091 skb_queue_purge(&dch->squeue);
1092 if (dch->tx_skb) {
1093 dev_kfree_skb(dch->tx_skb);
1094 dch->tx_skb = NULL;
1095 }
1096 dch->tx_idx = 0;
1097 if (dch->rx_skb) {
1098 dev_kfree_skb(dch->rx_skb);
1099 dch->rx_skb = NULL;
1100 }
1101 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1102 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1103 del_timer(&dch->timer);
1104 break;
1105 case HW_POWERUP_REQ:
1106 Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION);
1107 break;
1108 case PH_ACTIVATE_IND:
1109 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
1110 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1111 GFP_ATOMIC);
1112 break;
1113 case PH_DEACTIVATE_IND:
1114 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
1115 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1116 GFP_ATOMIC);
1117 break;
1118 default:
1119 if (dch->debug & DEBUG_HW)
1120 printk(KERN_DEBUG "%s: unknown command %x\n",
1121 __func__, cmd);
1122 return -1;
1123 }
1124 return 0;
1125 }
1126
1127 /*
1128 * Interrupt handler
1129 */
1130 static inline void
tx_birq(struct bchannel * bch)1131 tx_birq(struct bchannel *bch)
1132 {
1133 if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
1134 hfcpci_fill_fifo(bch);
1135 else {
1136 if (bch->tx_skb)
1137 dev_kfree_skb(bch->tx_skb);
1138 if (get_next_bframe(bch))
1139 hfcpci_fill_fifo(bch);
1140 }
1141 }
1142
1143 static inline void
tx_dirq(struct dchannel * dch)1144 tx_dirq(struct dchannel *dch)
1145 {
1146 if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len)
1147 hfcpci_fill_dfifo(dch->hw);
1148 else {
1149 if (dch->tx_skb)
1150 dev_kfree_skb(dch->tx_skb);
1151 if (get_next_dframe(dch))
1152 hfcpci_fill_dfifo(dch->hw);
1153 }
1154 }
1155
1156 static irqreturn_t
hfcpci_int(int intno,void * dev_id)1157 hfcpci_int(int intno, void *dev_id)
1158 {
1159 struct hfc_pci *hc = dev_id;
1160 u_char exval;
1161 struct bchannel *bch;
1162 u_char val, stat;
1163
1164 spin_lock(&hc->lock);
1165 if (!(hc->hw.int_m2 & 0x08)) {
1166 spin_unlock(&hc->lock);
1167 return IRQ_NONE; /* not initialised */
1168 }
1169 stat = Read_hfc(hc, HFCPCI_STATUS);
1170 if (HFCPCI_ANYINT & stat) {
1171 val = Read_hfc(hc, HFCPCI_INT_S1);
1172 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1173 printk(KERN_DEBUG
1174 "HFC-PCI: stat(%02x) s1(%02x)\n", stat, val);
1175 } else {
1176 /* shared */
1177 spin_unlock(&hc->lock);
1178 return IRQ_NONE;
1179 }
1180 hc->irqcnt++;
1181
1182 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1183 printk(KERN_DEBUG "HFC-PCI irq %x\n", val);
1184 val &= hc->hw.int_m1;
1185 if (val & 0x40) { /* state machine irq */
1186 exval = Read_hfc(hc, HFCPCI_STATES) & 0xf;
1187 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1188 printk(KERN_DEBUG "ph_state chg %d->%d\n",
1189 hc->dch.state, exval);
1190 hc->dch.state = exval;
1191 schedule_event(&hc->dch, FLG_PHCHANGE);
1192 val &= ~0x40;
1193 }
1194 if (val & 0x80) { /* timer irq */
1195 if (hc->hw.protocol == ISDN_P_NT_S0) {
1196 if ((--hc->hw.nt_timer) < 0)
1197 schedule_event(&hc->dch, FLG_PHCHANGE);
1198 }
1199 val &= ~0x80;
1200 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
1201 }
1202 if (val & 0x08) { /* B1 rx */
1203 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1204 if (bch)
1205 main_rec_hfcpci(bch);
1206 else if (hc->dch.debug)
1207 printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
1208 }
1209 if (val & 0x10) { /* B2 rx */
1210 bch = Sel_BCS(hc, 2);
1211 if (bch)
1212 main_rec_hfcpci(bch);
1213 else if (hc->dch.debug)
1214 printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
1215 }
1216 if (val & 0x01) { /* B1 tx */
1217 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1218 if (bch)
1219 tx_birq(bch);
1220 else if (hc->dch.debug)
1221 printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
1222 }
1223 if (val & 0x02) { /* B2 tx */
1224 bch = Sel_BCS(hc, 2);
1225 if (bch)
1226 tx_birq(bch);
1227 else if (hc->dch.debug)
1228 printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
1229 }
1230 if (val & 0x20) /* D rx */
1231 receive_dmsg(hc);
1232 if (val & 0x04) { /* D tx */
1233 if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags))
1234 del_timer(&hc->dch.timer);
1235 tx_dirq(&hc->dch);
1236 }
1237 spin_unlock(&hc->lock);
1238 return IRQ_HANDLED;
1239 }
1240
1241 /*
1242 * timer callback for D-chan busy resolution. Currently no function
1243 */
1244 static void
hfcpci_dbusy_timer(struct timer_list * t)1245 hfcpci_dbusy_timer(struct timer_list *t)
1246 {
1247 }
1248
1249 /*
1250 * activate/deactivate hardware for selected channels and mode
1251 */
1252 static int
mode_hfcpci(struct bchannel * bch,int bc,int protocol)1253 mode_hfcpci(struct bchannel *bch, int bc, int protocol)
1254 {
1255 struct hfc_pci *hc = bch->hw;
1256 int fifo2;
1257 u_char rx_slot = 0, tx_slot = 0, pcm_mode;
1258
1259 if (bch->debug & DEBUG_HW_BCHANNEL)
1260 printk(KERN_DEBUG
1261 "HFCPCI bchannel protocol %x-->%x ch %x-->%x\n",
1262 bch->state, protocol, bch->nr, bc);
1263
1264 fifo2 = bc;
1265 pcm_mode = (bc >> 24) & 0xff;
1266 if (pcm_mode) { /* PCM SLOT USE */
1267 if (!test_bit(HFC_CFG_PCM, &hc->cfg))
1268 printk(KERN_WARNING
1269 "%s: pcm channel id without HFC_CFG_PCM\n",
1270 __func__);
1271 rx_slot = (bc >> 8) & 0xff;
1272 tx_slot = (bc >> 16) & 0xff;
1273 bc = bc & 0xff;
1274 } else if (test_bit(HFC_CFG_PCM, &hc->cfg) && (protocol > ISDN_P_NONE))
1275 printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n",
1276 __func__);
1277 if (hc->chanlimit > 1) {
1278 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1279 hc->hw.sctrl_e &= ~0x80;
1280 } else {
1281 if (bc & 2) {
1282 if (protocol != ISDN_P_NONE) {
1283 hc->hw.bswapped = 1; /* B1 and B2 exchanged */
1284 hc->hw.sctrl_e |= 0x80;
1285 } else {
1286 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1287 hc->hw.sctrl_e &= ~0x80;
1288 }
1289 fifo2 = 1;
1290 } else {
1291 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1292 hc->hw.sctrl_e &= ~0x80;
1293 }
1294 }
1295 switch (protocol) {
1296 case (-1): /* used for init */
1297 bch->state = -1;
1298 bch->nr = bc;
1299 /* fall through */
1300 case (ISDN_P_NONE):
1301 if (bch->state == ISDN_P_NONE)
1302 return 0;
1303 if (bc & 2) {
1304 hc->hw.sctrl &= ~SCTRL_B2_ENA;
1305 hc->hw.sctrl_r &= ~SCTRL_B2_ENA;
1306 } else {
1307 hc->hw.sctrl &= ~SCTRL_B1_ENA;
1308 hc->hw.sctrl_r &= ~SCTRL_B1_ENA;
1309 }
1310 if (fifo2 & 2) {
1311 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
1312 hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS |
1313 HFCPCI_INTS_B2REC);
1314 } else {
1315 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
1316 hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS |
1317 HFCPCI_INTS_B1REC);
1318 }
1319 #ifdef REVERSE_BITORDER
1320 if (bch->nr & 2)
1321 hc->hw.cirm &= 0x7f;
1322 else
1323 hc->hw.cirm &= 0xbf;
1324 #endif
1325 bch->state = ISDN_P_NONE;
1326 bch->nr = bc;
1327 test_and_clear_bit(FLG_HDLC, &bch->Flags);
1328 test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags);
1329 break;
1330 case (ISDN_P_B_RAW):
1331 bch->state = protocol;
1332 bch->nr = bc;
1333 hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
1334 hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
1335 if (bc & 2) {
1336 hc->hw.sctrl |= SCTRL_B2_ENA;
1337 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1338 #ifdef REVERSE_BITORDER
1339 hc->hw.cirm |= 0x80;
1340 #endif
1341 } else {
1342 hc->hw.sctrl |= SCTRL_B1_ENA;
1343 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1344 #ifdef REVERSE_BITORDER
1345 hc->hw.cirm |= 0x40;
1346 #endif
1347 }
1348 if (fifo2 & 2) {
1349 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1350 if (!tics)
1351 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
1352 HFCPCI_INTS_B2REC);
1353 hc->hw.ctmt |= 2;
1354 hc->hw.conn &= ~0x18;
1355 } else {
1356 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1357 if (!tics)
1358 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
1359 HFCPCI_INTS_B1REC);
1360 hc->hw.ctmt |= 1;
1361 hc->hw.conn &= ~0x03;
1362 }
1363 test_and_set_bit(FLG_TRANSPARENT, &bch->Flags);
1364 break;
1365 case (ISDN_P_B_HDLC):
1366 bch->state = protocol;
1367 bch->nr = bc;
1368 hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
1369 hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
1370 if (bc & 2) {
1371 hc->hw.sctrl |= SCTRL_B2_ENA;
1372 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1373 } else {
1374 hc->hw.sctrl |= SCTRL_B1_ENA;
1375 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1376 }
1377 if (fifo2 & 2) {
1378 hc->hw.last_bfifo_cnt[1] = 0;
1379 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1380 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
1381 HFCPCI_INTS_B2REC);
1382 hc->hw.ctmt &= ~2;
1383 hc->hw.conn &= ~0x18;
1384 } else {
1385 hc->hw.last_bfifo_cnt[0] = 0;
1386 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1387 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
1388 HFCPCI_INTS_B1REC);
1389 hc->hw.ctmt &= ~1;
1390 hc->hw.conn &= ~0x03;
1391 }
1392 test_and_set_bit(FLG_HDLC, &bch->Flags);
1393 break;
1394 default:
1395 printk(KERN_DEBUG "prot not known %x\n", protocol);
1396 return -ENOPROTOOPT;
1397 }
1398 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
1399 if ((protocol == ISDN_P_NONE) ||
1400 (protocol == -1)) { /* init case */
1401 rx_slot = 0;
1402 tx_slot = 0;
1403 } else {
1404 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
1405 rx_slot |= 0xC0;
1406 tx_slot |= 0xC0;
1407 } else {
1408 rx_slot |= 0x80;
1409 tx_slot |= 0x80;
1410 }
1411 }
1412 if (bc & 2) {
1413 hc->hw.conn &= 0xc7;
1414 hc->hw.conn |= 0x08;
1415 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n",
1416 __func__, tx_slot);
1417 printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n",
1418 __func__, rx_slot);
1419 Write_hfc(hc, HFCPCI_B2_SSL, tx_slot);
1420 Write_hfc(hc, HFCPCI_B2_RSL, rx_slot);
1421 } else {
1422 hc->hw.conn &= 0xf8;
1423 hc->hw.conn |= 0x01;
1424 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n",
1425 __func__, tx_slot);
1426 printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n",
1427 __func__, rx_slot);
1428 Write_hfc(hc, HFCPCI_B1_SSL, tx_slot);
1429 Write_hfc(hc, HFCPCI_B1_RSL, rx_slot);
1430 }
1431 }
1432 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
1433 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1434 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1435 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
1436 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1437 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1438 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1439 #ifdef REVERSE_BITORDER
1440 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1441 #endif
1442 return 0;
1443 }
1444
1445 static int
set_hfcpci_rxtest(struct bchannel * bch,int protocol,int chan)1446 set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
1447 {
1448 struct hfc_pci *hc = bch->hw;
1449
1450 if (bch->debug & DEBUG_HW_BCHANNEL)
1451 printk(KERN_DEBUG
1452 "HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n",
1453 bch->state, protocol, bch->nr, chan);
1454 if (bch->nr != chan) {
1455 printk(KERN_DEBUG
1456 "HFCPCI rxtest wrong channel parameter %x/%x\n",
1457 bch->nr, chan);
1458 return -EINVAL;
1459 }
1460 switch (protocol) {
1461 case (ISDN_P_B_RAW):
1462 bch->state = protocol;
1463 hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
1464 if (chan & 2) {
1465 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1466 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1467 if (!tics)
1468 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1469 hc->hw.ctmt |= 2;
1470 hc->hw.conn &= ~0x18;
1471 #ifdef REVERSE_BITORDER
1472 hc->hw.cirm |= 0x80;
1473 #endif
1474 } else {
1475 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1476 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1477 if (!tics)
1478 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1479 hc->hw.ctmt |= 1;
1480 hc->hw.conn &= ~0x03;
1481 #ifdef REVERSE_BITORDER
1482 hc->hw.cirm |= 0x40;
1483 #endif
1484 }
1485 break;
1486 case (ISDN_P_B_HDLC):
1487 bch->state = protocol;
1488 hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
1489 if (chan & 2) {
1490 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1491 hc->hw.last_bfifo_cnt[1] = 0;
1492 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1493 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1494 hc->hw.ctmt &= ~2;
1495 hc->hw.conn &= ~0x18;
1496 } else {
1497 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1498 hc->hw.last_bfifo_cnt[0] = 0;
1499 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1500 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1501 hc->hw.ctmt &= ~1;
1502 hc->hw.conn &= ~0x03;
1503 }
1504 break;
1505 default:
1506 printk(KERN_DEBUG "prot not known %x\n", protocol);
1507 return -ENOPROTOOPT;
1508 }
1509 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1510 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1511 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1512 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1513 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1514 #ifdef REVERSE_BITORDER
1515 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1516 #endif
1517 return 0;
1518 }
1519
1520 static void
deactivate_bchannel(struct bchannel * bch)1521 deactivate_bchannel(struct bchannel *bch)
1522 {
1523 struct hfc_pci *hc = bch->hw;
1524 u_long flags;
1525
1526 spin_lock_irqsave(&hc->lock, flags);
1527 mISDN_clear_bchannel(bch);
1528 mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1529 spin_unlock_irqrestore(&hc->lock, flags);
1530 }
1531
1532 /*
1533 * Layer 1 B-channel hardware access
1534 */
1535 static int
channel_bctrl(struct bchannel * bch,struct mISDN_ctrl_req * cq)1536 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1537 {
1538 return mISDN_ctrl_bchannel(bch, cq);
1539 }
1540 static int
hfc_bctrl(struct mISDNchannel * ch,u_int cmd,void * arg)1541 hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1542 {
1543 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1544 struct hfc_pci *hc = bch->hw;
1545 int ret = -EINVAL;
1546 u_long flags;
1547
1548 if (bch->debug & DEBUG_HW)
1549 printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
1550 switch (cmd) {
1551 case HW_TESTRX_RAW:
1552 spin_lock_irqsave(&hc->lock, flags);
1553 ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, (int)(long)arg);
1554 spin_unlock_irqrestore(&hc->lock, flags);
1555 break;
1556 case HW_TESTRX_HDLC:
1557 spin_lock_irqsave(&hc->lock, flags);
1558 ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, (int)(long)arg);
1559 spin_unlock_irqrestore(&hc->lock, flags);
1560 break;
1561 case HW_TESTRX_OFF:
1562 spin_lock_irqsave(&hc->lock, flags);
1563 mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1564 spin_unlock_irqrestore(&hc->lock, flags);
1565 ret = 0;
1566 break;
1567 case CLOSE_CHANNEL:
1568 test_and_clear_bit(FLG_OPEN, &bch->Flags);
1569 deactivate_bchannel(bch);
1570 ch->protocol = ISDN_P_NONE;
1571 ch->peer = NULL;
1572 module_put(THIS_MODULE);
1573 ret = 0;
1574 break;
1575 case CONTROL_CHANNEL:
1576 ret = channel_bctrl(bch, arg);
1577 break;
1578 default:
1579 printk(KERN_WARNING "%s: unknown prim(%x)\n",
1580 __func__, cmd);
1581 }
1582 return ret;
1583 }
1584
1585 /*
1586 * Layer2 -> Layer 1 Dchannel data
1587 */
1588 static int
hfcpci_l2l1D(struct mISDNchannel * ch,struct sk_buff * skb)1589 hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
1590 {
1591 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1592 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1593 struct hfc_pci *hc = dch->hw;
1594 int ret = -EINVAL;
1595 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1596 unsigned int id;
1597 u_long flags;
1598
1599 switch (hh->prim) {
1600 case PH_DATA_REQ:
1601 spin_lock_irqsave(&hc->lock, flags);
1602 ret = dchannel_senddata(dch, skb);
1603 if (ret > 0) { /* direct TX */
1604 id = hh->id; /* skb can be freed */
1605 hfcpci_fill_dfifo(dch->hw);
1606 ret = 0;
1607 spin_unlock_irqrestore(&hc->lock, flags);
1608 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1609 } else
1610 spin_unlock_irqrestore(&hc->lock, flags);
1611 return ret;
1612 case PH_ACTIVATE_REQ:
1613 spin_lock_irqsave(&hc->lock, flags);
1614 if (hc->hw.protocol == ISDN_P_NT_S0) {
1615 ret = 0;
1616 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1617 hc->hw.mst_m |= HFCPCI_MASTER;
1618 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1619 if (test_bit(FLG_ACTIVE, &dch->Flags)) {
1620 spin_unlock_irqrestore(&hc->lock, flags);
1621 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
1622 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1623 break;
1624 }
1625 test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags);
1626 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1627 HFCPCI_DO_ACTION | 1);
1628 } else
1629 ret = l1_event(dch->l1, hh->prim);
1630 spin_unlock_irqrestore(&hc->lock, flags);
1631 break;
1632 case PH_DEACTIVATE_REQ:
1633 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1634 spin_lock_irqsave(&hc->lock, flags);
1635 if (hc->hw.protocol == ISDN_P_NT_S0) {
1636 struct sk_buff_head free_queue;
1637
1638 __skb_queue_head_init(&free_queue);
1639 /* prepare deactivation */
1640 Write_hfc(hc, HFCPCI_STATES, 0x40);
1641 skb_queue_splice_init(&dch->squeue, &free_queue);
1642 if (dch->tx_skb) {
1643 __skb_queue_tail(&free_queue, dch->tx_skb);
1644 dch->tx_skb = NULL;
1645 }
1646 dch->tx_idx = 0;
1647 if (dch->rx_skb) {
1648 __skb_queue_tail(&free_queue, dch->rx_skb);
1649 dch->rx_skb = NULL;
1650 }
1651 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1652 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1653 del_timer(&dch->timer);
1654 #ifdef FIXME
1655 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
1656 dchannel_sched_event(&hc->dch, D_CLEARBUSY);
1657 #endif
1658 hc->hw.mst_m &= ~HFCPCI_MASTER;
1659 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1660 ret = 0;
1661 spin_unlock_irqrestore(&hc->lock, flags);
1662 __skb_queue_purge(&free_queue);
1663 } else {
1664 ret = l1_event(dch->l1, hh->prim);
1665 spin_unlock_irqrestore(&hc->lock, flags);
1666 }
1667 break;
1668 }
1669 if (!ret)
1670 dev_kfree_skb(skb);
1671 return ret;
1672 }
1673
1674 /*
1675 * Layer2 -> Layer 1 Bchannel data
1676 */
1677 static int
hfcpci_l2l1B(struct mISDNchannel * ch,struct sk_buff * skb)1678 hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
1679 {
1680 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1681 struct hfc_pci *hc = bch->hw;
1682 int ret = -EINVAL;
1683 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1684 unsigned long flags;
1685
1686 switch (hh->prim) {
1687 case PH_DATA_REQ:
1688 spin_lock_irqsave(&hc->lock, flags);
1689 ret = bchannel_senddata(bch, skb);
1690 if (ret > 0) { /* direct TX */
1691 hfcpci_fill_fifo(bch);
1692 ret = 0;
1693 }
1694 spin_unlock_irqrestore(&hc->lock, flags);
1695 return ret;
1696 case PH_ACTIVATE_REQ:
1697 spin_lock_irqsave(&hc->lock, flags);
1698 if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
1699 ret = mode_hfcpci(bch, bch->nr, ch->protocol);
1700 else
1701 ret = 0;
1702 spin_unlock_irqrestore(&hc->lock, flags);
1703 if (!ret)
1704 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
1705 NULL, GFP_KERNEL);
1706 break;
1707 case PH_DEACTIVATE_REQ:
1708 deactivate_bchannel(bch);
1709 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
1710 NULL, GFP_KERNEL);
1711 ret = 0;
1712 break;
1713 }
1714 if (!ret)
1715 dev_kfree_skb(skb);
1716 return ret;
1717 }
1718
1719 /*
1720 * called for card init message
1721 */
1722
1723 static void
inithfcpci(struct hfc_pci * hc)1724 inithfcpci(struct hfc_pci *hc)
1725 {
1726 printk(KERN_DEBUG "inithfcpci: entered\n");
1727 timer_setup(&hc->dch.timer, hfcpci_dbusy_timer, 0);
1728 hc->chanlimit = 2;
1729 mode_hfcpci(&hc->bch[0], 1, -1);
1730 mode_hfcpci(&hc->bch[1], 2, -1);
1731 }
1732
1733
1734 static int
init_card(struct hfc_pci * hc)1735 init_card(struct hfc_pci *hc)
1736 {
1737 int cnt = 3;
1738 u_long flags;
1739
1740 printk(KERN_DEBUG "init_card: entered\n");
1741
1742
1743 spin_lock_irqsave(&hc->lock, flags);
1744 disable_hwirq(hc);
1745 spin_unlock_irqrestore(&hc->lock, flags);
1746 if (request_irq(hc->irq, hfcpci_int, IRQF_SHARED, "HFC PCI", hc)) {
1747 printk(KERN_WARNING
1748 "mISDN: couldn't get interrupt %d\n", hc->irq);
1749 return -EIO;
1750 }
1751 spin_lock_irqsave(&hc->lock, flags);
1752 reset_hfcpci(hc);
1753 while (cnt) {
1754 inithfcpci(hc);
1755 /*
1756 * Finally enable IRQ output
1757 * this is only allowed, if an IRQ routine is already
1758 * established for this HFC, so don't do that earlier
1759 */
1760 enable_hwirq(hc);
1761 spin_unlock_irqrestore(&hc->lock, flags);
1762 /* Timeout 80ms */
1763 set_current_state(TASK_UNINTERRUPTIBLE);
1764 schedule_timeout((80 * HZ) / 1000);
1765 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1766 hc->irq, hc->irqcnt);
1767 /* now switch timer interrupt off */
1768 spin_lock_irqsave(&hc->lock, flags);
1769 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1770 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1771 /* reinit mode reg */
1772 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1773 if (!hc->irqcnt) {
1774 printk(KERN_WARNING
1775 "HFC PCI: IRQ(%d) getting no interrupts "
1776 "during init %d\n", hc->irq, 4 - cnt);
1777 if (cnt == 1)
1778 break;
1779 else {
1780 reset_hfcpci(hc);
1781 cnt--;
1782 }
1783 } else {
1784 spin_unlock_irqrestore(&hc->lock, flags);
1785 hc->initdone = 1;
1786 return 0;
1787 }
1788 }
1789 disable_hwirq(hc);
1790 spin_unlock_irqrestore(&hc->lock, flags);
1791 free_irq(hc->irq, hc);
1792 return -EIO;
1793 }
1794
1795 static int
channel_ctrl(struct hfc_pci * hc,struct mISDN_ctrl_req * cq)1796 channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1797 {
1798 int ret = 0;
1799 u_char slot;
1800
1801 switch (cq->op) {
1802 case MISDN_CTRL_GETOP:
1803 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1804 MISDN_CTRL_DISCONNECT | MISDN_CTRL_L1_TIMER3;
1805 break;
1806 case MISDN_CTRL_LOOP:
1807 /* channel 0 disabled loop */
1808 if (cq->channel < 0 || cq->channel > 2) {
1809 ret = -EINVAL;
1810 break;
1811 }
1812 if (cq->channel & 1) {
1813 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1814 slot = 0xC0;
1815 else
1816 slot = 0x80;
1817 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1818 __func__, slot);
1819 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1820 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1821 hc->hw.conn = (hc->hw.conn & ~7) | 6;
1822 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1823 }
1824 if (cq->channel & 2) {
1825 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1826 slot = 0xC1;
1827 else
1828 slot = 0x81;
1829 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1830 __func__, slot);
1831 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1832 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1833 hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30;
1834 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1835 }
1836 if (cq->channel & 3)
1837 hc->hw.trm |= 0x80; /* enable IOM-loop */
1838 else {
1839 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1840 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1841 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1842 }
1843 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1844 break;
1845 case MISDN_CTRL_CONNECT:
1846 if (cq->channel == cq->p1) {
1847 ret = -EINVAL;
1848 break;
1849 }
1850 if (cq->channel < 1 || cq->channel > 2 ||
1851 cq->p1 < 1 || cq->p1 > 2) {
1852 ret = -EINVAL;
1853 break;
1854 }
1855 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1856 slot = 0xC0;
1857 else
1858 slot = 0x80;
1859 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1860 __func__, slot);
1861 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1862 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1863 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1864 slot = 0xC1;
1865 else
1866 slot = 0x81;
1867 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1868 __func__, slot);
1869 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1870 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1871 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36;
1872 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1873 hc->hw.trm |= 0x80;
1874 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1875 break;
1876 case MISDN_CTRL_DISCONNECT:
1877 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1878 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1879 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1880 break;
1881 case MISDN_CTRL_L1_TIMER3:
1882 ret = l1_event(hc->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
1883 break;
1884 default:
1885 printk(KERN_WARNING "%s: unknown Op %x\n",
1886 __func__, cq->op);
1887 ret = -EINVAL;
1888 break;
1889 }
1890 return ret;
1891 }
1892
1893 static int
open_dchannel(struct hfc_pci * hc,struct mISDNchannel * ch,struct channel_req * rq)1894 open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
1895 struct channel_req *rq)
1896 {
1897 int err = 0;
1898
1899 if (debug & DEBUG_HW_OPEN)
1900 printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
1901 hc->dch.dev.id, __builtin_return_address(0));
1902 if (rq->protocol == ISDN_P_NONE)
1903 return -EINVAL;
1904 if (rq->adr.channel == 1) {
1905 /* TODO: E-Channel */
1906 return -EINVAL;
1907 }
1908 if (!hc->initdone) {
1909 if (rq->protocol == ISDN_P_TE_S0) {
1910 err = create_l1(&hc->dch, hfc_l1callback);
1911 if (err)
1912 return err;
1913 }
1914 hc->hw.protocol = rq->protocol;
1915 ch->protocol = rq->protocol;
1916 err = init_card(hc);
1917 if (err)
1918 return err;
1919 } else {
1920 if (rq->protocol != ch->protocol) {
1921 if (hc->hw.protocol == ISDN_P_TE_S0)
1922 l1_event(hc->dch.l1, CLOSE_CHANNEL);
1923 if (rq->protocol == ISDN_P_TE_S0) {
1924 err = create_l1(&hc->dch, hfc_l1callback);
1925 if (err)
1926 return err;
1927 }
1928 hc->hw.protocol = rq->protocol;
1929 ch->protocol = rq->protocol;
1930 hfcpci_setmode(hc);
1931 }
1932 }
1933
1934 if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) ||
1935 ((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) {
1936 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
1937 0, NULL, GFP_KERNEL);
1938 }
1939 rq->ch = ch;
1940 if (!try_module_get(THIS_MODULE))
1941 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1942 return 0;
1943 }
1944
1945 static int
open_bchannel(struct hfc_pci * hc,struct channel_req * rq)1946 open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
1947 {
1948 struct bchannel *bch;
1949
1950 if (rq->adr.channel == 0 || rq->adr.channel > 2)
1951 return -EINVAL;
1952 if (rq->protocol == ISDN_P_NONE)
1953 return -EINVAL;
1954 bch = &hc->bch[rq->adr.channel - 1];
1955 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
1956 return -EBUSY; /* b-channel can be only open once */
1957 bch->ch.protocol = rq->protocol;
1958 rq->ch = &bch->ch; /* TODO: E-channel */
1959 if (!try_module_get(THIS_MODULE))
1960 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1961 return 0;
1962 }
1963
1964 /*
1965 * device control function
1966 */
1967 static int
hfc_dctrl(struct mISDNchannel * ch,u_int cmd,void * arg)1968 hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1969 {
1970 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1971 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1972 struct hfc_pci *hc = dch->hw;
1973 struct channel_req *rq;
1974 int err = 0;
1975
1976 if (dch->debug & DEBUG_HW)
1977 printk(KERN_DEBUG "%s: cmd:%x %p\n",
1978 __func__, cmd, arg);
1979 switch (cmd) {
1980 case OPEN_CHANNEL:
1981 rq = arg;
1982 if ((rq->protocol == ISDN_P_TE_S0) ||
1983 (rq->protocol == ISDN_P_NT_S0))
1984 err = open_dchannel(hc, ch, rq);
1985 else
1986 err = open_bchannel(hc, rq);
1987 break;
1988 case CLOSE_CHANNEL:
1989 if (debug & DEBUG_HW_OPEN)
1990 printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
1991 __func__, hc->dch.dev.id,
1992 __builtin_return_address(0));
1993 module_put(THIS_MODULE);
1994 break;
1995 case CONTROL_CHANNEL:
1996 err = channel_ctrl(hc, arg);
1997 break;
1998 default:
1999 if (dch->debug & DEBUG_HW)
2000 printk(KERN_DEBUG "%s: unknown command %x\n",
2001 __func__, cmd);
2002 return -EINVAL;
2003 }
2004 return err;
2005 }
2006
2007 static int
setup_hw(struct hfc_pci * hc)2008 setup_hw(struct hfc_pci *hc)
2009 {
2010 void *buffer;
2011
2012 printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision);
2013 hc->hw.cirm = 0;
2014 hc->dch.state = 0;
2015 pci_set_master(hc->pdev);
2016 if (!hc->irq) {
2017 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
2018 return 1;
2019 }
2020 hc->hw.pci_io =
2021 (char __iomem *)(unsigned long)hc->pdev->resource[1].start;
2022
2023 if (!hc->hw.pci_io) {
2024 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
2025 return 1;
2026 }
2027 /* Allocate memory for FIFOS */
2028 /* the memory needs to be on a 32k boundary within the first 4G */
2029 pci_set_dma_mask(hc->pdev, 0xFFFF8000);
2030 buffer = pci_alloc_consistent(hc->pdev, 0x8000, &hc->hw.dmahandle);
2031 /* We silently assume the address is okay if nonzero */
2032 if (!buffer) {
2033 printk(KERN_WARNING
2034 "HFC-PCI: Error allocating memory for FIFO!\n");
2035 return 1;
2036 }
2037 hc->hw.fifos = buffer;
2038 pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
2039 hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
2040 printk(KERN_INFO
2041 "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n",
2042 (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos,
2043 (u_long) hc->hw.dmahandle, hc->irq, HZ);
2044 /* enable memory mapped ports, disable busmaster */
2045 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
2046 hc->hw.int_m2 = 0;
2047 disable_hwirq(hc);
2048 hc->hw.int_m1 = 0;
2049 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
2050 /* At this point the needed PCI config is done */
2051 /* fifos are still not enabled */
2052 timer_setup(&hc->hw.timer, hfcpci_Timer, 0);
2053 /* default PCM master */
2054 test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
2055 return 0;
2056 }
2057
2058 static void
release_card(struct hfc_pci * hc)2059 release_card(struct hfc_pci *hc) {
2060 u_long flags;
2061
2062 spin_lock_irqsave(&hc->lock, flags);
2063 hc->hw.int_m2 = 0; /* interrupt output off ! */
2064 disable_hwirq(hc);
2065 mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE);
2066 mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE);
2067 if (hc->dch.timer.function != NULL) {
2068 del_timer(&hc->dch.timer);
2069 hc->dch.timer.function = NULL;
2070 }
2071 spin_unlock_irqrestore(&hc->lock, flags);
2072 if (hc->hw.protocol == ISDN_P_TE_S0)
2073 l1_event(hc->dch.l1, CLOSE_CHANNEL);
2074 if (hc->initdone)
2075 free_irq(hc->irq, hc);
2076 release_io_hfcpci(hc); /* must release after free_irq! */
2077 mISDN_unregister_device(&hc->dch.dev);
2078 mISDN_freebchannel(&hc->bch[1]);
2079 mISDN_freebchannel(&hc->bch[0]);
2080 mISDN_freedchannel(&hc->dch);
2081 pci_set_drvdata(hc->pdev, NULL);
2082 kfree(hc);
2083 }
2084
2085 static int
setup_card(struct hfc_pci * card)2086 setup_card(struct hfc_pci *card)
2087 {
2088 int err = -EINVAL;
2089 u_int i;
2090 char name[MISDN_MAX_IDLEN];
2091
2092 card->dch.debug = debug;
2093 spin_lock_init(&card->lock);
2094 mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
2095 card->dch.hw = card;
2096 card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
2097 card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
2098 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
2099 card->dch.dev.D.send = hfcpci_l2l1D;
2100 card->dch.dev.D.ctrl = hfc_dctrl;
2101 card->dch.dev.nrbchan = 2;
2102 for (i = 0; i < 2; i++) {
2103 card->bch[i].nr = i + 1;
2104 set_channelmap(i + 1, card->dch.dev.channelmap);
2105 card->bch[i].debug = debug;
2106 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, poll >> 1);
2107 card->bch[i].hw = card;
2108 card->bch[i].ch.send = hfcpci_l2l1B;
2109 card->bch[i].ch.ctrl = hfc_bctrl;
2110 card->bch[i].ch.nr = i + 1;
2111 list_add(&card->bch[i].ch.list, &card->dch.dev.bchannels);
2112 }
2113 err = setup_hw(card);
2114 if (err)
2115 goto error;
2116 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1);
2117 err = mISDN_register_device(&card->dch.dev, &card->pdev->dev, name);
2118 if (err)
2119 goto error;
2120 HFC_cnt++;
2121 printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
2122 return 0;
2123 error:
2124 mISDN_freebchannel(&card->bch[1]);
2125 mISDN_freebchannel(&card->bch[0]);
2126 mISDN_freedchannel(&card->dch);
2127 kfree(card);
2128 return err;
2129 }
2130
2131 /* private data in the PCI devices list */
2132 struct _hfc_map {
2133 u_int subtype;
2134 u_int flag;
2135 char *name;
2136 };
2137
2138 static const struct _hfc_map hfc_map[] =
2139 {
2140 {HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"},
2141 {HFC_CCD_B000, 0, "Billion B000"},
2142 {HFC_CCD_B006, 0, "Billion B006"},
2143 {HFC_CCD_B007, 0, "Billion B007"},
2144 {HFC_CCD_B008, 0, "Billion B008"},
2145 {HFC_CCD_B009, 0, "Billion B009"},
2146 {HFC_CCD_B00A, 0, "Billion B00A"},
2147 {HFC_CCD_B00B, 0, "Billion B00B"},
2148 {HFC_CCD_B00C, 0, "Billion B00C"},
2149 {HFC_CCD_B100, 0, "Seyeon B100"},
2150 {HFC_CCD_B700, 0, "Primux II S0 B700"},
2151 {HFC_CCD_B701, 0, "Primux II S0 NT B701"},
2152 {HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"},
2153 {HFC_ASUS_0675, 0, "Asuscom/Askey 675"},
2154 {HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"},
2155 {HFC_BERKOM_A1T, 0, "German telekom A1T"},
2156 {HFC_ANIGMA_MC145575, 0, "Motorola MC145575"},
2157 {HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"},
2158 {HFC_DIGI_DF_M_IOM2_E, 0,
2159 "Digi International DataFire Micro V IOM2 (Europe)"},
2160 {HFC_DIGI_DF_M_E, 0,
2161 "Digi International DataFire Micro V (Europe)"},
2162 {HFC_DIGI_DF_M_IOM2_A, 0,
2163 "Digi International DataFire Micro V IOM2 (North America)"},
2164 {HFC_DIGI_DF_M_A, 0,
2165 "Digi International DataFire Micro V (North America)"},
2166 {HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"},
2167 {},
2168 };
2169
2170 static const struct pci_device_id hfc_ids[] =
2171 {
2172 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
2173 (unsigned long) &hfc_map[0] },
2174 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B000),
2175 (unsigned long) &hfc_map[1] },
2176 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B006),
2177 (unsigned long) &hfc_map[2] },
2178 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B007),
2179 (unsigned long) &hfc_map[3] },
2180 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B008),
2181 (unsigned long) &hfc_map[4] },
2182 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B009),
2183 (unsigned long) &hfc_map[5] },
2184 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00A),
2185 (unsigned long) &hfc_map[6] },
2186 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00B),
2187 (unsigned long) &hfc_map[7] },
2188 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00C),
2189 (unsigned long) &hfc_map[8] },
2190 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B100),
2191 (unsigned long) &hfc_map[9] },
2192 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B700),
2193 (unsigned long) &hfc_map[10] },
2194 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B701),
2195 (unsigned long) &hfc_map[11] },
2196 { PCI_VDEVICE(ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1),
2197 (unsigned long) &hfc_map[12] },
2198 { PCI_VDEVICE(ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675),
2199 (unsigned long) &hfc_map[13] },
2200 { PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT),
2201 (unsigned long) &hfc_map[14] },
2202 { PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_A1T),
2203 (unsigned long) &hfc_map[15] },
2204 { PCI_VDEVICE(ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575),
2205 (unsigned long) &hfc_map[16] },
2206 { PCI_VDEVICE(ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0),
2207 (unsigned long) &hfc_map[17] },
2208 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E),
2209 (unsigned long) &hfc_map[18] },
2210 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_E),
2211 (unsigned long) &hfc_map[19] },
2212 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A),
2213 (unsigned long) &hfc_map[20] },
2214 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_A),
2215 (unsigned long) &hfc_map[21] },
2216 { PCI_VDEVICE(SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2),
2217 (unsigned long) &hfc_map[22] },
2218 {},
2219 };
2220
2221 static int
hfc_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2222 hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2223 {
2224 int err = -ENOMEM;
2225 struct hfc_pci *card;
2226 struct _hfc_map *m = (struct _hfc_map *)ent->driver_data;
2227
2228 card = kzalloc(sizeof(struct hfc_pci), GFP_KERNEL);
2229 if (!card) {
2230 printk(KERN_ERR "No kmem for HFC card\n");
2231 return err;
2232 }
2233 card->pdev = pdev;
2234 card->subtype = m->subtype;
2235 err = pci_enable_device(pdev);
2236 if (err) {
2237 kfree(card);
2238 return err;
2239 }
2240
2241 printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n",
2242 m->name, pci_name(pdev));
2243
2244 card->irq = pdev->irq;
2245 pci_set_drvdata(pdev, card);
2246 err = setup_card(card);
2247 if (err)
2248 pci_set_drvdata(pdev, NULL);
2249 return err;
2250 }
2251
2252 static void
hfc_remove_pci(struct pci_dev * pdev)2253 hfc_remove_pci(struct pci_dev *pdev)
2254 {
2255 struct hfc_pci *card = pci_get_drvdata(pdev);
2256
2257 if (card)
2258 release_card(card);
2259 else
2260 if (debug)
2261 printk(KERN_DEBUG "%s: drvdata already removed\n",
2262 __func__);
2263 }
2264
2265
2266 static struct pci_driver hfc_driver = {
2267 .name = "hfcpci",
2268 .probe = hfc_probe,
2269 .remove = hfc_remove_pci,
2270 .id_table = hfc_ids,
2271 };
2272
2273 static int
_hfcpci_softirq(struct device * dev,void * unused)2274 _hfcpci_softirq(struct device *dev, void *unused)
2275 {
2276 struct hfc_pci *hc = dev_get_drvdata(dev);
2277 struct bchannel *bch;
2278 if (hc == NULL)
2279 return 0;
2280
2281 if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
2282 spin_lock(&hc->lock);
2283 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
2284 if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
2285 main_rec_hfcpci(bch);
2286 tx_birq(bch);
2287 }
2288 bch = Sel_BCS(hc, hc->hw.bswapped ? 1 : 2);
2289 if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */
2290 main_rec_hfcpci(bch);
2291 tx_birq(bch);
2292 }
2293 spin_unlock(&hc->lock);
2294 }
2295 return 0;
2296 }
2297
2298 static void
hfcpci_softirq(struct timer_list * unused)2299 hfcpci_softirq(struct timer_list *unused)
2300 {
2301 WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, NULL,
2302 _hfcpci_softirq) != 0);
2303
2304 /* if next event would be in the past ... */
2305 if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
2306 hfc_jiffies = jiffies + 1;
2307 else
2308 hfc_jiffies += tics;
2309 hfc_tl.expires = hfc_jiffies;
2310 add_timer(&hfc_tl);
2311 }
2312
2313 static int __init
HFC_init(void)2314 HFC_init(void)
2315 {
2316 int err;
2317
2318 if (!poll)
2319 poll = HFCPCI_BTRANS_THRESHOLD;
2320
2321 if (poll != HFCPCI_BTRANS_THRESHOLD) {
2322 tics = (poll * HZ) / 8000;
2323 if (tics < 1)
2324 tics = 1;
2325 poll = (tics * 8000) / HZ;
2326 if (poll > 256 || poll < 8) {
2327 printk(KERN_ERR "%s: Wrong poll value %d not in range "
2328 "of 8..256.\n", __func__, poll);
2329 err = -EINVAL;
2330 return err;
2331 }
2332 }
2333 if (poll != HFCPCI_BTRANS_THRESHOLD) {
2334 printk(KERN_INFO "%s: Using alternative poll value of %d\n",
2335 __func__, poll);
2336 timer_setup(&hfc_tl, hfcpci_softirq, 0);
2337 hfc_tl.expires = jiffies + tics;
2338 hfc_jiffies = hfc_tl.expires;
2339 add_timer(&hfc_tl);
2340 } else
2341 tics = 0; /* indicate the use of controller's timer */
2342
2343 err = pci_register_driver(&hfc_driver);
2344 if (err) {
2345 if (timer_pending(&hfc_tl))
2346 del_timer(&hfc_tl);
2347 }
2348
2349 return err;
2350 }
2351
2352 static void __exit
HFC_cleanup(void)2353 HFC_cleanup(void)
2354 {
2355 if (timer_pending(&hfc_tl))
2356 del_timer_sync(&hfc_tl);
2357
2358 pci_unregister_driver(&hfc_driver);
2359 }
2360
2361 module_init(HFC_init);
2362 module_exit(HFC_cleanup);
2363
2364 MODULE_DEVICE_TABLE(pci, hfc_ids);
2365