1 /* $Id: hfc_pci.c,v 1.48.2.4 2004/02/11 13:21:33 keil Exp $
2 *
3 * low level driver for CCD's hfc-pci based cards
4 *
5 * Author Werner Cornelius
6 * based on existing driver for CCD hfc ISA cards
7 * Copyright by Werner Cornelius <werner@isdn4linux.de>
8 * by Karsten Keil <keil@isdn4linux.de>
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
12 *
13 * For changes and modifications please read
14 * Documentation/isdn/HiSax.cert
15 *
16 */
17
18 #include <linux/init.h>
19 #include "hisax.h"
20 #include "hfc_pci.h"
21 #include "isdnl1.h"
22 #include <linux/pci.h>
23 #include <linux/sched.h>
24 #include <linux/interrupt.h>
25
26 static const char *hfcpci_revision = "$Revision: 1.48.2.4 $";
27
28 /* table entry in the PCI devices list */
29 typedef struct {
30 int vendor_id;
31 int device_id;
32 char *vendor_name;
33 char *card_name;
34 } PCI_ENTRY;
35
36 #define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */
37 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
38 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
39
40 static const PCI_ENTRY id_list[] =
41 {
42 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, "CCD/Billion/Asuscom", "2BD0"},
43 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, "Billion", "B000"},
44 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, "Billion", "B006"},
45 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, "Billion", "B007"},
46 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, "Billion", "B008"},
47 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, "Billion", "B009"},
48 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, "Billion", "B00A"},
49 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, "Billion", "B00B"},
50 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, "Billion", "B00C"},
51 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, "Seyeon", "B100"},
52 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, "Primux II S0", "B700"},
53 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, "Primux II S0 NT", "B701"},
54 {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, "Abocom/Magitek", "2BD1"},
55 {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, "Asuscom/Askey", "675"},
56 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, "German telekom", "T-Concept"},
57 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, "German telekom", "A1T"},
58 {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, "Motorola MC145575", "MC145575"},
59 {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, "Zoltrix", "2BD0"},
60 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E, "Digi International", "Digi DataFire Micro V IOM2 (Europe)"},
61 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E, "Digi International", "Digi DataFire Micro V (Europe)"},
62 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A, "Digi International", "Digi DataFire Micro V IOM2 (North America)"},
63 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A, "Digi International", "Digi DataFire Micro V (North America)"},
64 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"},
65 {0, 0, NULL, NULL},
66 };
67
68
69 /******************************************/
70 /* free hardware resources used by driver */
71 /******************************************/
72 static void
release_io_hfcpci(struct IsdnCardState * cs)73 release_io_hfcpci(struct IsdnCardState *cs)
74 {
75 printk(KERN_INFO "HiSax: release hfcpci at %p\n",
76 cs->hw.hfcpci.pci_io);
77 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
78 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
79 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
80 mdelay(10);
81 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
82 mdelay(10);
83 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
84 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, 0); /* disable memory mapped ports + busmaster */
85 del_timer(&cs->hw.hfcpci.timer);
86 pci_free_consistent(cs->hw.hfcpci.dev, 0x8000,
87 cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma);
88 cs->hw.hfcpci.fifos = NULL;
89 iounmap((void *)cs->hw.hfcpci.pci_io);
90 }
91
92 /********************************************************************************/
93 /* function called to reset the HFC PCI chip. A complete software reset of chip */
94 /* and fifos is done. */
95 /********************************************************************************/
96 static void
reset_hfcpci(struct IsdnCardState * cs)97 reset_hfcpci(struct IsdnCardState *cs)
98 {
99 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
100 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
101 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
102
103 printk(KERN_INFO "HFC_PCI: resetting card\n");
104 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO + PCI_ENA_MASTER); /* enable memory ports + busmaster */
105 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
106 mdelay(10);
107 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
108 mdelay(10);
109 if (Read_hfc(cs, HFCPCI_STATUS) & 2)
110 printk(KERN_WARNING "HFC-PCI init bit busy\n");
111
112 cs->hw.hfcpci.fifo_en = 0x30; /* only D fifos enabled */
113 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
114
115 cs->hw.hfcpci.trm = 0 + HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
116 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
117
118 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_TE); /* ST-Bit delay for TE-Mode */
119 cs->hw.hfcpci.sctrl_e = HFCPCI_AUTO_AWAKE;
120 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e); /* S/T Auto awake */
121 cs->hw.hfcpci.bswapped = 0; /* no exchange */
122 cs->hw.hfcpci.nt_mode = 0; /* we are in TE mode */
123 cs->hw.hfcpci.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
124 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
125
126 cs->hw.hfcpci.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
127 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
128 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
129
130 /* Clear already pending ints */
131 if (Read_hfc(cs, HFCPCI_INT_S1));
132
133 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 2); /* HFC ST 2 */
134 udelay(10);
135 Write_hfc(cs, HFCPCI_STATES, 2); /* HFC ST 2 */
136 cs->hw.hfcpci.mst_m = HFCPCI_MASTER; /* HFC Master Mode */
137
138 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
139 cs->hw.hfcpci.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
140 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
141 cs->hw.hfcpci.sctrl_r = 0;
142 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
143
144 /* Init GCI/IOM2 in master mode */
145 /* Slots 0 and 1 are set for B-chan 1 and 2 */
146 /* D- and monitor/CI channel are not enabled */
147 /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */
148 /* STIO2 is used as data input, B1+B2 from IOM->ST */
149 /* ST B-channel send disabled -> continuous 1s */
150 /* The IOM slots are always enabled */
151 cs->hw.hfcpci.conn = 0x36; /* set data flow directions */
152 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
153 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* B1-Slot 0 STIO1 out enabled */
154 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* B2-Slot 1 STIO1 out enabled */
155 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* B1-Slot 0 STIO2 in enabled */
156 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* B2-Slot 1 STIO2 in enabled */
157
158 /* Finally enable IRQ output */
159 cs->hw.hfcpci.int_m2 = HFCPCI_IRQ_ENABLE;
160 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
161 if (Read_hfc(cs, HFCPCI_INT_S1));
162 }
163
164 /***************************************************/
165 /* Timer function called when kernel timer expires */
166 /***************************************************/
167 static void
hfcpci_Timer(struct timer_list * t)168 hfcpci_Timer(struct timer_list *t)
169 {
170 struct IsdnCardState *cs = from_timer(cs, t, hw.hfcpci.timer);
171 cs->hw.hfcpci.timer.expires = jiffies + 75;
172 /* WD RESET */
173 /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80);
174 add_timer(&cs->hw.hfcpci.timer);
175 */
176 }
177
178
179 /*********************************/
180 /* schedule a new D-channel task */
181 /*********************************/
182 static void
sched_event_D_pci(struct IsdnCardState * cs,int event)183 sched_event_D_pci(struct IsdnCardState *cs, int event)
184 {
185 test_and_set_bit(event, &cs->event);
186 schedule_work(&cs->tqueue);
187 }
188
189 /*********************************/
190 /* schedule a new b_channel task */
191 /*********************************/
192 static void
hfcpci_sched_event(struct BCState * bcs,int event)193 hfcpci_sched_event(struct BCState *bcs, int event)
194 {
195 test_and_set_bit(event, &bcs->event);
196 schedule_work(&bcs->tqueue);
197 }
198
199 /************************************************/
200 /* select a b-channel entry matching and active */
201 /************************************************/
202 static
203 struct BCState *
Sel_BCS(struct IsdnCardState * cs,int channel)204 Sel_BCS(struct IsdnCardState *cs, int channel)
205 {
206 if (cs->bcs[0].mode && (cs->bcs[0].channel == channel))
207 return (&cs->bcs[0]);
208 else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel))
209 return (&cs->bcs[1]);
210 else
211 return (NULL);
212 }
213
214 /***************************************/
215 /* clear the desired B-channel rx fifo */
216 /***************************************/
hfcpci_clear_fifo_rx(struct IsdnCardState * cs,int fifo)217 static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo)
218 { u_char fifo_state;
219 bzfifo_type *bzr;
220
221 if (fifo) {
222 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
223 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2RX;
224 } else {
225 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
226 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1RX;
227 }
228 if (fifo_state)
229 cs->hw.hfcpci.fifo_en ^= fifo_state;
230 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
231 cs->hw.hfcpci.last_bfifo_cnt[fifo] = 0;
232 bzr->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
233 bzr->za[MAX_B_FRAMES].z2 = bzr->za[MAX_B_FRAMES].z1;
234 bzr->f1 = MAX_B_FRAMES;
235 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
236 if (fifo_state)
237 cs->hw.hfcpci.fifo_en |= fifo_state;
238 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
239 }
240
241 /***************************************/
242 /* clear the desired B-channel tx fifo */
243 /***************************************/
hfcpci_clear_fifo_tx(struct IsdnCardState * cs,int fifo)244 static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo)
245 { u_char fifo_state;
246 bzfifo_type *bzt;
247
248 if (fifo) {
249 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
250 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2TX;
251 } else {
252 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
253 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1TX;
254 }
255 if (fifo_state)
256 cs->hw.hfcpci.fifo_en ^= fifo_state;
257 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
258 bzt->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
259 bzt->za[MAX_B_FRAMES].z2 = bzt->za[MAX_B_FRAMES].z1;
260 bzt->f1 = MAX_B_FRAMES;
261 bzt->f2 = bzt->f1; /* init F pointers to remain constant */
262 if (fifo_state)
263 cs->hw.hfcpci.fifo_en |= fifo_state;
264 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
265 }
266
267 /*********************************************/
268 /* read a complete B-frame out of the buffer */
269 /*********************************************/
270 static struct sk_buff
271 *
hfcpci_empty_fifo(struct BCState * bcs,bzfifo_type * bz,u_char * bdata,int count)272 hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type *bz, u_char *bdata, int count)
273 {
274 u_char *ptr, *ptr1, new_f2;
275 struct sk_buff *skb;
276 struct IsdnCardState *cs = bcs->cs;
277 int total, maxlen, new_z2;
278 z_type *zp;
279
280 if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
281 debugl1(cs, "hfcpci_empty_fifo");
282 zp = &bz->za[bz->f2]; /* point to Z-Regs */
283 new_z2 = zp->z2 + count; /* new position in fifo */
284 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
285 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
286 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
287 if ((count > HSCX_BUFMAX + 3) || (count < 4) ||
288 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
289 if (cs->debug & L1_DEB_WARN)
290 debugl1(cs, "hfcpci_empty_fifo: incoming packet invalid length %d or crc", count);
291 #ifdef ERROR_STATISTIC
292 bcs->err_inv++;
293 #endif
294 bz->za[new_f2].z2 = new_z2;
295 bz->f2 = new_f2; /* next buffer */
296 skb = NULL;
297 } else if (!(skb = dev_alloc_skb(count - 3)))
298 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
299 else {
300 total = count;
301 count -= 3;
302 ptr = skb_put(skb, count);
303
304 if (zp->z2 + count <= B_FIFO_SIZE + B_SUB_VAL)
305 maxlen = count; /* complete transfer */
306 else
307 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
308
309 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
310 memcpy(ptr, ptr1, maxlen); /* copy data */
311 count -= maxlen;
312
313 if (count) { /* rest remaining */
314 ptr += maxlen;
315 ptr1 = bdata; /* start of buffer */
316 memcpy(ptr, ptr1, count); /* rest */
317 }
318 bz->za[new_f2].z2 = new_z2;
319 bz->f2 = new_f2; /* next buffer */
320
321 }
322 return (skb);
323 }
324
325 /*******************************/
326 /* D-channel receive procedure */
327 /*******************************/
328 static
329 int
receive_dmsg(struct IsdnCardState * cs)330 receive_dmsg(struct IsdnCardState *cs)
331 {
332 struct sk_buff *skb;
333 int maxlen;
334 int rcnt, total;
335 int count = 5;
336 u_char *ptr, *ptr1;
337 dfifo_type *df;
338 z_type *zp;
339
340 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_rx;
341 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
342 debugl1(cs, "rec_dmsg blocked");
343 return (1);
344 }
345 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
346 zp = &df->za[df->f2 & D_FREG_MASK];
347 rcnt = zp->z1 - zp->z2;
348 if (rcnt < 0)
349 rcnt += D_FIFO_SIZE;
350 rcnt++;
351 if (cs->debug & L1_DEB_ISAC)
352 debugl1(cs, "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)",
353 df->f1, df->f2, zp->z1, zp->z2, rcnt);
354
355 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
356 (df->data[zp->z1])) {
357 if (cs->debug & L1_DEB_WARN)
358 debugl1(cs, "empty_fifo hfcpci packet inv. len %d or crc %d", rcnt, df->data[zp->z1]);
359 #ifdef ERROR_STATISTIC
360 cs->err_rx++;
361 #endif
362 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
363 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + rcnt) & (D_FIFO_SIZE - 1);
364 } else if ((skb = dev_alloc_skb(rcnt - 3))) {
365 total = rcnt;
366 rcnt -= 3;
367 ptr = skb_put(skb, rcnt);
368
369 if (zp->z2 + rcnt <= D_FIFO_SIZE)
370 maxlen = rcnt; /* complete transfer */
371 else
372 maxlen = D_FIFO_SIZE - zp->z2; /* maximum */
373
374 ptr1 = df->data + zp->z2; /* start of data */
375 memcpy(ptr, ptr1, maxlen); /* copy data */
376 rcnt -= maxlen;
377
378 if (rcnt) { /* rest remaining */
379 ptr += maxlen;
380 ptr1 = df->data; /* start of buffer */
381 memcpy(ptr, ptr1, rcnt); /* rest */
382 }
383 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
384 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + total) & (D_FIFO_SIZE - 1);
385
386 skb_queue_tail(&cs->rq, skb);
387 sched_event_D_pci(cs, D_RCVBUFREADY);
388 } else
389 printk(KERN_WARNING "HFC-PCI: D receive out of memory\n");
390 }
391 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
392 return (1);
393 }
394
395 /*******************************************************************************/
396 /* check for transparent receive data and read max one threshold size if avail */
397 /*******************************************************************************/
398 static int
hfcpci_empty_fifo_trans(struct BCState * bcs,bzfifo_type * bz,u_char * bdata)399 hfcpci_empty_fifo_trans(struct BCState *bcs, bzfifo_type *bz, u_char *bdata)
400 {
401 unsigned short *z1r, *z2r;
402 int new_z2, fcnt, maxlen;
403 struct sk_buff *skb;
404 u_char *ptr, *ptr1;
405
406 z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
407 z2r = z1r + 1;
408
409 if (!(fcnt = *z1r - *z2r))
410 return (0); /* no data avail */
411
412 if (fcnt <= 0)
413 fcnt += B_FIFO_SIZE; /* bytes actually buffered */
414 if (fcnt > HFCPCI_BTRANS_THRESHOLD)
415 fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */
416
417 new_z2 = *z2r + fcnt; /* new position in fifo */
418 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
419 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
420
421 if (!(skb = dev_alloc_skb(fcnt)))
422 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
423 else {
424 ptr = skb_put(skb, fcnt);
425 if (*z2r + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
426 maxlen = fcnt; /* complete transfer */
427 else
428 maxlen = B_FIFO_SIZE + B_SUB_VAL - *z2r; /* maximum */
429
430 ptr1 = bdata + (*z2r - B_SUB_VAL); /* start of data */
431 memcpy(ptr, ptr1, maxlen); /* copy data */
432 fcnt -= maxlen;
433
434 if (fcnt) { /* rest remaining */
435 ptr += maxlen;
436 ptr1 = bdata; /* start of buffer */
437 memcpy(ptr, ptr1, fcnt); /* rest */
438 }
439 skb_queue_tail(&bcs->rqueue, skb);
440 hfcpci_sched_event(bcs, B_RCVBUFREADY);
441 }
442
443 *z2r = new_z2; /* new position */
444 return (1);
445 } /* hfcpci_empty_fifo_trans */
446
447 /**********************************/
448 /* B-channel main receive routine */
449 /**********************************/
450 static void
main_rec_hfcpci(struct BCState * bcs)451 main_rec_hfcpci(struct BCState *bcs)
452 {
453 struct IsdnCardState *cs = bcs->cs;
454 int rcnt, real_fifo;
455 int receive, count = 5;
456 struct sk_buff *skb;
457 bzfifo_type *bz;
458 u_char *bdata;
459 z_type *zp;
460
461
462 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
463 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
464 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
465 real_fifo = 1;
466 } else {
467 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
468 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b1;
469 real_fifo = 0;
470 }
471 Begin:
472 count--;
473 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
474 debugl1(cs, "rec_data %d blocked", bcs->channel);
475 return;
476 }
477 if (bz->f1 != bz->f2) {
478 if (cs->debug & L1_DEB_HSCX)
479 debugl1(cs, "hfcpci rec %d f1(%d) f2(%d)",
480 bcs->channel, bz->f1, bz->f2);
481 zp = &bz->za[bz->f2];
482
483 rcnt = zp->z1 - zp->z2;
484 if (rcnt < 0)
485 rcnt += B_FIFO_SIZE;
486 rcnt++;
487 if (cs->debug & L1_DEB_HSCX)
488 debugl1(cs, "hfcpci rec %d z1(%x) z2(%x) cnt(%d)",
489 bcs->channel, zp->z1, zp->z2, rcnt);
490 if ((skb = hfcpci_empty_fifo(bcs, bz, bdata, rcnt))) {
491 skb_queue_tail(&bcs->rqueue, skb);
492 hfcpci_sched_event(bcs, B_RCVBUFREADY);
493 }
494 rcnt = bz->f1 - bz->f2;
495 if (rcnt < 0)
496 rcnt += MAX_B_FRAMES + 1;
497 if (cs->hw.hfcpci.last_bfifo_cnt[real_fifo] > rcnt + 1) {
498 rcnt = 0;
499 hfcpci_clear_fifo_rx(cs, real_fifo);
500 }
501 cs->hw.hfcpci.last_bfifo_cnt[real_fifo] = rcnt;
502 if (rcnt > 1)
503 receive = 1;
504 else
505 receive = 0;
506 } else if (bcs->mode == L1_MODE_TRANS)
507 receive = hfcpci_empty_fifo_trans(bcs, bz, bdata);
508 else
509 receive = 0;
510 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
511 if (count && receive)
512 goto Begin;
513 }
514
515 /**************************/
516 /* D-channel send routine */
517 /**************************/
518 static void
hfcpci_fill_dfifo(struct IsdnCardState * cs)519 hfcpci_fill_dfifo(struct IsdnCardState *cs)
520 {
521 int fcnt;
522 int count, new_z1, maxlen;
523 dfifo_type *df;
524 u_char *src, *dst, new_f1;
525
526 if (!cs->tx_skb)
527 return;
528 if (cs->tx_skb->len <= 0)
529 return;
530
531 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_tx;
532
533 if (cs->debug & L1_DEB_ISAC)
534 debugl1(cs, "hfcpci_fill_Dfifo f1(%d) f2(%d) z1(f1)(%x)",
535 df->f1, df->f2,
536 df->za[df->f1 & D_FREG_MASK].z1);
537 fcnt = df->f1 - df->f2; /* frame count actually buffered */
538 if (fcnt < 0)
539 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
540 if (fcnt > (MAX_D_FRAMES - 1)) {
541 if (cs->debug & L1_DEB_ISAC)
542 debugl1(cs, "hfcpci_fill_Dfifo more as 14 frames");
543 #ifdef ERROR_STATISTIC
544 cs->err_tx++;
545 #endif
546 return;
547 }
548 /* now determine free bytes in FIFO buffer */
549 count = df->za[df->f2 & D_FREG_MASK].z2 - df->za[df->f1 & D_FREG_MASK].z1 - 1;
550 if (count <= 0)
551 count += D_FIFO_SIZE; /* count now contains available bytes */
552
553 if (cs->debug & L1_DEB_ISAC)
554 debugl1(cs, "hfcpci_fill_Dfifo count(%u/%d)",
555 cs->tx_skb->len, count);
556 if (count < cs->tx_skb->len) {
557 if (cs->debug & L1_DEB_ISAC)
558 debugl1(cs, "hfcpci_fill_Dfifo no fifo mem");
559 return;
560 }
561 count = cs->tx_skb->len; /* get frame len */
562 new_z1 = (df->za[df->f1 & D_FREG_MASK].z1 + count) & (D_FIFO_SIZE - 1);
563 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
564 src = cs->tx_skb->data; /* source pointer */
565 dst = df->data + df->za[df->f1 & D_FREG_MASK].z1;
566 maxlen = D_FIFO_SIZE - df->za[df->f1 & D_FREG_MASK].z1; /* end fifo */
567 if (maxlen > count)
568 maxlen = count; /* limit size */
569 memcpy(dst, src, maxlen); /* first copy */
570
571 count -= maxlen; /* remaining bytes */
572 if (count) {
573 dst = df->data; /* start of buffer */
574 src += maxlen; /* new position */
575 memcpy(dst, src, count);
576 }
577 df->za[new_f1 & D_FREG_MASK].z1 = new_z1; /* for next buffer */
578 df->za[df->f1 & D_FREG_MASK].z1 = new_z1; /* new pos actual buffer */
579 df->f1 = new_f1; /* next frame */
580
581 dev_kfree_skb_any(cs->tx_skb);
582 cs->tx_skb = NULL;
583 }
584
585 /**************************/
586 /* B-channel send routine */
587 /**************************/
588 static void
hfcpci_fill_fifo(struct BCState * bcs)589 hfcpci_fill_fifo(struct BCState *bcs)
590 {
591 struct IsdnCardState *cs = bcs->cs;
592 int maxlen, fcnt;
593 int count, new_z1;
594 bzfifo_type *bz;
595 u_char *bdata;
596 u_char new_f1, *src, *dst;
597 unsigned short *z1t, *z2t;
598
599 if (!bcs->tx_skb)
600 return;
601 if (bcs->tx_skb->len <= 0)
602 return;
603
604 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
605 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
606 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b2;
607 } else {
608 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
609 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b1;
610 }
611
612 if (bcs->mode == L1_MODE_TRANS) {
613 z1t = &bz->za[MAX_B_FRAMES].z1;
614 z2t = z1t + 1;
615 if (cs->debug & L1_DEB_HSCX)
616 debugl1(cs, "hfcpci_fill_fifo_trans %d z1(%x) z2(%x)",
617 bcs->channel, *z1t, *z2t);
618 fcnt = *z2t - *z1t;
619 if (fcnt <= 0)
620 fcnt += B_FIFO_SIZE; /* fcnt contains available bytes in fifo */
621 fcnt = B_FIFO_SIZE - fcnt; /* remaining bytes to send */
622
623 while ((fcnt < 2 * HFCPCI_BTRANS_THRESHOLD) && (bcs->tx_skb)) {
624 if (bcs->tx_skb->len < B_FIFO_SIZE - fcnt) {
625 /* data is suitable for fifo */
626 count = bcs->tx_skb->len;
627
628 new_z1 = *z1t + count; /* new buffer Position */
629 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
630 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
631 src = bcs->tx_skb->data; /* source pointer */
632 dst = bdata + (*z1t - B_SUB_VAL);
633 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - *z1t; /* end of fifo */
634 if (maxlen > count)
635 maxlen = count; /* limit size */
636 memcpy(dst, src, maxlen); /* first copy */
637
638 count -= maxlen; /* remaining bytes */
639 if (count) {
640 dst = bdata; /* start of buffer */
641 src += maxlen; /* new position */
642 memcpy(dst, src, count);
643 }
644 bcs->tx_cnt -= bcs->tx_skb->len;
645 fcnt += bcs->tx_skb->len;
646 *z1t = new_z1; /* now send data */
647 } else if (cs->debug & L1_DEB_HSCX)
648 debugl1(cs, "hfcpci_fill_fifo_trans %d frame length %d discarded",
649 bcs->channel, bcs->tx_skb->len);
650
651 if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
652 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
653 u_long flags;
654 spin_lock_irqsave(&bcs->aclock, flags);
655 bcs->ackcnt += bcs->tx_skb->len;
656 spin_unlock_irqrestore(&bcs->aclock, flags);
657 schedule_event(bcs, B_ACKPENDING);
658 }
659
660 dev_kfree_skb_any(bcs->tx_skb);
661 bcs->tx_skb = skb_dequeue(&bcs->squeue); /* fetch next data */
662 }
663 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
664 return;
665 }
666 if (cs->debug & L1_DEB_HSCX)
667 debugl1(cs, "hfcpci_fill_fifo_hdlc %d f1(%d) f2(%d) z1(f1)(%x)",
668 bcs->channel, bz->f1, bz->f2,
669 bz->za[bz->f1].z1);
670
671 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
672 if (fcnt < 0)
673 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
674 if (fcnt > (MAX_B_FRAMES - 1)) {
675 if (cs->debug & L1_DEB_HSCX)
676 debugl1(cs, "hfcpci_fill_Bfifo more as 14 frames");
677 return;
678 }
679 /* now determine free bytes in FIFO buffer */
680 count = bz->za[bz->f2].z2 - bz->za[bz->f1].z1 - 1;
681 if (count <= 0)
682 count += B_FIFO_SIZE; /* count now contains available bytes */
683
684 if (cs->debug & L1_DEB_HSCX)
685 debugl1(cs, "hfcpci_fill_fifo %d count(%u/%d),%lx",
686 bcs->channel, bcs->tx_skb->len,
687 count, current->state);
688
689 if (count < bcs->tx_skb->len) {
690 if (cs->debug & L1_DEB_HSCX)
691 debugl1(cs, "hfcpci_fill_fifo no fifo mem");
692 return;
693 }
694 count = bcs->tx_skb->len; /* get frame len */
695 new_z1 = bz->za[bz->f1].z1 + count; /* new buffer Position */
696 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
697 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
698
699 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
700 src = bcs->tx_skb->data; /* source pointer */
701 dst = bdata + (bz->za[bz->f1].z1 - B_SUB_VAL);
702 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - bz->za[bz->f1].z1; /* end fifo */
703 if (maxlen > count)
704 maxlen = count; /* limit size */
705 memcpy(dst, src, maxlen); /* first copy */
706
707 count -= maxlen; /* remaining bytes */
708 if (count) {
709 dst = bdata; /* start of buffer */
710 src += maxlen; /* new position */
711 memcpy(dst, src, count);
712 }
713 bcs->tx_cnt -= bcs->tx_skb->len;
714 if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
715 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
716 u_long flags;
717 spin_lock_irqsave(&bcs->aclock, flags);
718 bcs->ackcnt += bcs->tx_skb->len;
719 spin_unlock_irqrestore(&bcs->aclock, flags);
720 schedule_event(bcs, B_ACKPENDING);
721 }
722
723 bz->za[new_f1].z1 = new_z1; /* for next buffer */
724 bz->f1 = new_f1; /* next frame */
725
726 dev_kfree_skb_any(bcs->tx_skb);
727 bcs->tx_skb = NULL;
728 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
729 }
730
731 /**********************************************/
732 /* D-channel l1 state call for leased NT-mode */
733 /**********************************************/
734 static void
dch_nt_l2l1(struct PStack * st,int pr,void * arg)735 dch_nt_l2l1(struct PStack *st, int pr, void *arg)
736 {
737 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
738
739 switch (pr) {
740 case (PH_DATA | REQUEST):
741 case (PH_PULL | REQUEST):
742 case (PH_PULL | INDICATION):
743 st->l1.l1hw(st, pr, arg);
744 break;
745 case (PH_ACTIVATE | REQUEST):
746 st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
747 break;
748 case (PH_TESTLOOP | REQUEST):
749 if (1 & (long) arg)
750 debugl1(cs, "PH_TEST_LOOP B1");
751 if (2 & (long) arg)
752 debugl1(cs, "PH_TEST_LOOP B2");
753 if (!(3 & (long) arg))
754 debugl1(cs, "PH_TEST_LOOP DISABLED");
755 st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg);
756 break;
757 default:
758 if (cs->debug)
759 debugl1(cs, "dch_nt_l2l1 msg %04X unhandled", pr);
760 break;
761 }
762 }
763
764
765
766 /***********************/
767 /* set/reset echo mode */
768 /***********************/
769 static int
hfcpci_auxcmd(struct IsdnCardState * cs,isdn_ctrl * ic)770 hfcpci_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic)
771 {
772 u_long flags;
773 int i = *(unsigned int *) ic->parm.num;
774
775 if ((ic->arg == 98) &&
776 (!(cs->hw.hfcpci.int_m1 & (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC + HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC)))) {
777 spin_lock_irqsave(&cs->lock, flags);
778 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_NT); /* ST-Bit delay for NT-Mode */
779 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 0); /* HFC ST G0 */
780 udelay(10);
781 cs->hw.hfcpci.sctrl |= SCTRL_MODE_NT;
782 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl); /* set NT-mode */
783 udelay(10);
784 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 1); /* HFC ST G1 */
785 udelay(10);
786 Write_hfc(cs, HFCPCI_STATES, 1 | HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
787 cs->dc.hfcpci.ph_state = 1;
788 cs->hw.hfcpci.nt_mode = 1;
789 cs->hw.hfcpci.nt_timer = 0;
790 cs->stlist->l2.l2l1 = dch_nt_l2l1;
791 spin_unlock_irqrestore(&cs->lock, flags);
792 debugl1(cs, "NT mode activated");
793 return (0);
794 }
795 if ((cs->chanlimit > 1) || (cs->hw.hfcpci.bswapped) ||
796 (cs->hw.hfcpci.nt_mode) || (ic->arg != 12))
797 return (-EINVAL);
798
799 spin_lock_irqsave(&cs->lock, flags);
800 if (i) {
801 cs->logecho = 1;
802 cs->hw.hfcpci.trm |= 0x20; /* enable echo chan */
803 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_B2REC;
804 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2RX;
805 } else {
806 cs->logecho = 0;
807 cs->hw.hfcpci.trm &= ~0x20; /* disable echo chan */
808 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_B2REC;
809 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2RX;
810 }
811 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
812 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
813 cs->hw.hfcpci.conn |= 0x10; /* B2-IOM -> B2-ST */
814 cs->hw.hfcpci.ctmt &= ~2;
815 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
816 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
817 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
818 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
819 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
820 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
821 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
822 spin_unlock_irqrestore(&cs->lock, flags);
823 return (0);
824 } /* hfcpci_auxcmd */
825
826 /*****************************/
827 /* E-channel receive routine */
828 /*****************************/
829 static void
receive_emsg(struct IsdnCardState * cs)830 receive_emsg(struct IsdnCardState *cs)
831 {
832 int rcnt;
833 int receive, count = 5;
834 bzfifo_type *bz;
835 u_char *bdata;
836 z_type *zp;
837 u_char *ptr, *ptr1, new_f2;
838 int total, maxlen, new_z2;
839 u_char e_buffer[256];
840
841 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
842 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
843 Begin:
844 count--;
845 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
846 debugl1(cs, "echo_rec_data blocked");
847 return;
848 }
849 if (bz->f1 != bz->f2) {
850 if (cs->debug & L1_DEB_ISAC)
851 debugl1(cs, "hfcpci e_rec f1(%d) f2(%d)",
852 bz->f1, bz->f2);
853 zp = &bz->za[bz->f2];
854
855 rcnt = zp->z1 - zp->z2;
856 if (rcnt < 0)
857 rcnt += B_FIFO_SIZE;
858 rcnt++;
859 if (cs->debug & L1_DEB_ISAC)
860 debugl1(cs, "hfcpci e_rec z1(%x) z2(%x) cnt(%d)",
861 zp->z1, zp->z2, rcnt);
862 new_z2 = zp->z2 + rcnt; /* new position in fifo */
863 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
864 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
865 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
866 if ((rcnt > 256 + 3) || (count < 4) ||
867 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
868 if (cs->debug & L1_DEB_WARN)
869 debugl1(cs, "hfcpci_empty_echan: incoming packet invalid length %d or crc", rcnt);
870 bz->za[new_f2].z2 = new_z2;
871 bz->f2 = new_f2; /* next buffer */
872 } else {
873 total = rcnt;
874 rcnt -= 3;
875 ptr = e_buffer;
876
877 if (zp->z2 <= B_FIFO_SIZE + B_SUB_VAL)
878 maxlen = rcnt; /* complete transfer */
879 else
880 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
881
882 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
883 memcpy(ptr, ptr1, maxlen); /* copy data */
884 rcnt -= maxlen;
885
886 if (rcnt) { /* rest remaining */
887 ptr += maxlen;
888 ptr1 = bdata; /* start of buffer */
889 memcpy(ptr, ptr1, rcnt); /* rest */
890 }
891 bz->za[new_f2].z2 = new_z2;
892 bz->f2 = new_f2; /* next buffer */
893 if (cs->debug & DEB_DLOG_HEX) {
894 ptr = cs->dlog;
895 if ((total - 3) < MAX_DLOG_SPACE / 3 - 10) {
896 *ptr++ = 'E';
897 *ptr++ = 'C';
898 *ptr++ = 'H';
899 *ptr++ = 'O';
900 *ptr++ = ':';
901 ptr += QuickHex(ptr, e_buffer, total - 3);
902 ptr--;
903 *ptr++ = '\n';
904 *ptr = 0;
905 HiSax_putstatus(cs, NULL, cs->dlog);
906 } else
907 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
908 }
909 }
910
911 rcnt = bz->f1 - bz->f2;
912 if (rcnt < 0)
913 rcnt += MAX_B_FRAMES + 1;
914 if (rcnt > 1)
915 receive = 1;
916 else
917 receive = 0;
918 } else
919 receive = 0;
920 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
921 if (count && receive)
922 goto Begin;
923 } /* receive_emsg */
924
925 /*********************/
926 /* Interrupt handler */
927 /*********************/
928 static irqreturn_t
hfcpci_interrupt(int intno,void * dev_id)929 hfcpci_interrupt(int intno, void *dev_id)
930 {
931 u_long flags;
932 struct IsdnCardState *cs = dev_id;
933 u_char exval;
934 struct BCState *bcs;
935 int count = 15;
936 u_char val, stat;
937
938 if (!(cs->hw.hfcpci.int_m2 & 0x08)) {
939 debugl1(cs, "HFC-PCI: int_m2 %x not initialised", cs->hw.hfcpci.int_m2);
940 return IRQ_NONE; /* not initialised */
941 }
942 spin_lock_irqsave(&cs->lock, flags);
943 if (HFCPCI_ANYINT & (stat = Read_hfc(cs, HFCPCI_STATUS))) {
944 val = Read_hfc(cs, HFCPCI_INT_S1);
945 if (cs->debug & L1_DEB_ISAC)
946 debugl1(cs, "HFC-PCI: stat(%02x) s1(%02x)", stat, val);
947 } else {
948 spin_unlock_irqrestore(&cs->lock, flags);
949 return IRQ_NONE;
950 }
951 if (cs->debug & L1_DEB_ISAC)
952 debugl1(cs, "HFC-PCI irq %x %s", val,
953 test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ?
954 "locked" : "unlocked");
955 val &= cs->hw.hfcpci.int_m1;
956 if (val & 0x40) { /* state machine irq */
957 exval = Read_hfc(cs, HFCPCI_STATES) & 0xf;
958 if (cs->debug & L1_DEB_ISAC)
959 debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcpci.ph_state,
960 exval);
961 cs->dc.hfcpci.ph_state = exval;
962 sched_event_D_pci(cs, D_L1STATECHANGE);
963 val &= ~0x40;
964 }
965 if (val & 0x80) { /* timer irq */
966 if (cs->hw.hfcpci.nt_mode) {
967 if ((--cs->hw.hfcpci.nt_timer) < 0)
968 sched_event_D_pci(cs, D_L1STATECHANGE);
969 }
970 val &= ~0x80;
971 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
972 }
973 while (val) {
974 if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
975 cs->hw.hfcpci.int_s1 |= val;
976 spin_unlock_irqrestore(&cs->lock, flags);
977 return IRQ_HANDLED;
978 }
979 if (cs->hw.hfcpci.int_s1 & 0x18) {
980 exval = val;
981 val = cs->hw.hfcpci.int_s1;
982 cs->hw.hfcpci.int_s1 = exval;
983 }
984 if (val & 0x08) {
985 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
986 if (cs->debug)
987 debugl1(cs, "hfcpci spurious 0x08 IRQ");
988 } else
989 main_rec_hfcpci(bcs);
990 }
991 if (val & 0x10) {
992 if (cs->logecho)
993 receive_emsg(cs);
994 else if (!(bcs = Sel_BCS(cs, 1))) {
995 if (cs->debug)
996 debugl1(cs, "hfcpci spurious 0x10 IRQ");
997 } else
998 main_rec_hfcpci(bcs);
999 }
1000 if (val & 0x01) {
1001 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
1002 if (cs->debug)
1003 debugl1(cs, "hfcpci spurious 0x01 IRQ");
1004 } else {
1005 if (bcs->tx_skb) {
1006 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1007 hfcpci_fill_fifo(bcs);
1008 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1009 } else
1010 debugl1(cs, "fill_data %d blocked", bcs->channel);
1011 } else {
1012 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1013 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1014 hfcpci_fill_fifo(bcs);
1015 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1016 } else
1017 debugl1(cs, "fill_data %d blocked", bcs->channel);
1018 } else {
1019 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1020 }
1021 }
1022 }
1023 }
1024 if (val & 0x02) {
1025 if (!(bcs = Sel_BCS(cs, 1))) {
1026 if (cs->debug)
1027 debugl1(cs, "hfcpci spurious 0x02 IRQ");
1028 } else {
1029 if (bcs->tx_skb) {
1030 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1031 hfcpci_fill_fifo(bcs);
1032 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1033 } else
1034 debugl1(cs, "fill_data %d blocked", bcs->channel);
1035 } else {
1036 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1037 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1038 hfcpci_fill_fifo(bcs);
1039 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1040 } else
1041 debugl1(cs, "fill_data %d blocked", bcs->channel);
1042 } else {
1043 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1044 }
1045 }
1046 }
1047 }
1048 if (val & 0x20) { /* receive dframe */
1049 receive_dmsg(cs);
1050 }
1051 if (val & 0x04) { /* dframe transmitted */
1052 if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
1053 del_timer(&cs->dbusytimer);
1054 if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
1055 sched_event_D_pci(cs, D_CLEARBUSY);
1056 if (cs->tx_skb) {
1057 if (cs->tx_skb->len) {
1058 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1059 hfcpci_fill_dfifo(cs);
1060 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1061 } else {
1062 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1063 }
1064 goto afterXPR;
1065 } else {
1066 dev_kfree_skb_irq(cs->tx_skb);
1067 cs->tx_cnt = 0;
1068 cs->tx_skb = NULL;
1069 }
1070 }
1071 if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
1072 cs->tx_cnt = 0;
1073 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1074 hfcpci_fill_dfifo(cs);
1075 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1076 } else {
1077 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1078 }
1079 } else
1080 sched_event_D_pci(cs, D_XMTBUFREADY);
1081 }
1082 afterXPR:
1083 if (cs->hw.hfcpci.int_s1 && count--) {
1084 val = cs->hw.hfcpci.int_s1;
1085 cs->hw.hfcpci.int_s1 = 0;
1086 if (cs->debug & L1_DEB_ISAC)
1087 debugl1(cs, "HFC-PCI irq %x loop %d", val, 15 - count);
1088 } else
1089 val = 0;
1090 }
1091 spin_unlock_irqrestore(&cs->lock, flags);
1092 return IRQ_HANDLED;
1093 }
1094
1095 /********************************************************************/
1096 /* timer callback for D-chan busy resolution. Currently no function */
1097 /********************************************************************/
1098 static void
hfcpci_dbusy_timer(struct timer_list * t)1099 hfcpci_dbusy_timer(struct timer_list *t)
1100 {
1101 }
1102
1103 /*************************************/
1104 /* Layer 1 D-channel hardware access */
1105 /*************************************/
1106 static void
HFCPCI_l1hw(struct PStack * st,int pr,void * arg)1107 HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
1108 {
1109 u_long flags;
1110 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
1111 struct sk_buff *skb = arg;
1112
1113 switch (pr) {
1114 case (PH_DATA | REQUEST):
1115 if (cs->debug & DEB_DLOG_HEX)
1116 LogFrame(cs, skb->data, skb->len);
1117 if (cs->debug & DEB_DLOG_VERBOSE)
1118 dlogframe(cs, skb, 0);
1119 spin_lock_irqsave(&cs->lock, flags);
1120 if (cs->tx_skb) {
1121 skb_queue_tail(&cs->sq, skb);
1122 #ifdef L2FRAME_DEBUG /* psa */
1123 if (cs->debug & L1_DEB_LAPD)
1124 Logl2Frame(cs, skb, "PH_DATA Queued", 0);
1125 #endif
1126 } else {
1127 cs->tx_skb = skb;
1128 cs->tx_cnt = 0;
1129 #ifdef L2FRAME_DEBUG /* psa */
1130 if (cs->debug & L1_DEB_LAPD)
1131 Logl2Frame(cs, skb, "PH_DATA", 0);
1132 #endif
1133 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1134 hfcpci_fill_dfifo(cs);
1135 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1136 } else
1137 debugl1(cs, "hfcpci_fill_dfifo blocked");
1138
1139 }
1140 spin_unlock_irqrestore(&cs->lock, flags);
1141 break;
1142 case (PH_PULL | INDICATION):
1143 spin_lock_irqsave(&cs->lock, flags);
1144 if (cs->tx_skb) {
1145 if (cs->debug & L1_DEB_WARN)
1146 debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
1147 skb_queue_tail(&cs->sq, skb);
1148 spin_unlock_irqrestore(&cs->lock, flags);
1149 break;
1150 }
1151 if (cs->debug & DEB_DLOG_HEX)
1152 LogFrame(cs, skb->data, skb->len);
1153 if (cs->debug & DEB_DLOG_VERBOSE)
1154 dlogframe(cs, skb, 0);
1155 cs->tx_skb = skb;
1156 cs->tx_cnt = 0;
1157 #ifdef L2FRAME_DEBUG /* psa */
1158 if (cs->debug & L1_DEB_LAPD)
1159 Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
1160 #endif
1161 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1162 hfcpci_fill_dfifo(cs);
1163 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1164 } else
1165 debugl1(cs, "hfcpci_fill_dfifo blocked");
1166 spin_unlock_irqrestore(&cs->lock, flags);
1167 break;
1168 case (PH_PULL | REQUEST):
1169 #ifdef L2FRAME_DEBUG /* psa */
1170 if (cs->debug & L1_DEB_LAPD)
1171 debugl1(cs, "-> PH_REQUEST_PULL");
1172 #endif
1173 spin_lock_irqsave(&cs->lock, flags);
1174 if (!cs->tx_skb) {
1175 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1176 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1177 } else
1178 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1179 spin_unlock_irqrestore(&cs->lock, flags);
1180 break;
1181 case (HW_RESET | REQUEST):
1182 spin_lock_irqsave(&cs->lock, flags);
1183 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3); /* HFC ST 3 */
1184 udelay(6);
1185 Write_hfc(cs, HFCPCI_STATES, 3); /* HFC ST 2 */
1186 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1187 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1188 Write_hfc(cs, HFCPCI_STATES, HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
1189 spin_unlock_irqrestore(&cs->lock, flags);
1190 l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
1191 break;
1192 case (HW_ENABLE | REQUEST):
1193 spin_lock_irqsave(&cs->lock, flags);
1194 Write_hfc(cs, HFCPCI_STATES, HFCPCI_DO_ACTION);
1195 spin_unlock_irqrestore(&cs->lock, flags);
1196 break;
1197 case (HW_DEACTIVATE | REQUEST):
1198 spin_lock_irqsave(&cs->lock, flags);
1199 cs->hw.hfcpci.mst_m &= ~HFCPCI_MASTER;
1200 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1201 spin_unlock_irqrestore(&cs->lock, flags);
1202 break;
1203 case (HW_INFO3 | REQUEST):
1204 spin_lock_irqsave(&cs->lock, flags);
1205 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1206 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1207 spin_unlock_irqrestore(&cs->lock, flags);
1208 break;
1209 case (HW_TESTLOOP | REQUEST):
1210 spin_lock_irqsave(&cs->lock, flags);
1211 switch ((long) arg) {
1212 case (1):
1213 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* tx slot */
1214 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* rx slot */
1215 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~7) | 1;
1216 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1217 break;
1218
1219 case (2):
1220 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* tx slot */
1221 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* rx slot */
1222 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~0x38) | 0x08;
1223 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1224 break;
1225
1226 default:
1227 spin_unlock_irqrestore(&cs->lock, flags);
1228 if (cs->debug & L1_DEB_WARN)
1229 debugl1(cs, "hfcpci_l1hw loop invalid %4lx", (long) arg);
1230 return;
1231 }
1232 cs->hw.hfcpci.trm |= 0x80; /* enable IOM-loop */
1233 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
1234 spin_unlock_irqrestore(&cs->lock, flags);
1235 break;
1236 default:
1237 if (cs->debug & L1_DEB_WARN)
1238 debugl1(cs, "hfcpci_l1hw unknown pr %4x", pr);
1239 break;
1240 }
1241 }
1242
1243 /***********************************************/
1244 /* called during init setting l1 stack pointer */
1245 /***********************************************/
1246 static void
setstack_hfcpci(struct PStack * st,struct IsdnCardState * cs)1247 setstack_hfcpci(struct PStack *st, struct IsdnCardState *cs)
1248 {
1249 st->l1.l1hw = HFCPCI_l1hw;
1250 }
1251
1252 /**************************************/
1253 /* send B-channel data if not blocked */
1254 /**************************************/
1255 static void
hfcpci_send_data(struct BCState * bcs)1256 hfcpci_send_data(struct BCState *bcs)
1257 {
1258 struct IsdnCardState *cs = bcs->cs;
1259
1260 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1261 hfcpci_fill_fifo(bcs);
1262 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1263 } else
1264 debugl1(cs, "send_data %d blocked", bcs->channel);
1265 }
1266
1267 /***************************************************************/
1268 /* activate/deactivate hardware for selected channels and mode */
1269 /***************************************************************/
1270 static void
mode_hfcpci(struct BCState * bcs,int mode,int bc)1271 mode_hfcpci(struct BCState *bcs, int mode, int bc)
1272 {
1273 struct IsdnCardState *cs = bcs->cs;
1274 int fifo2;
1275
1276 if (cs->debug & L1_DEB_HSCX)
1277 debugl1(cs, "HFCPCI bchannel mode %d bchan %d/%d",
1278 mode, bc, bcs->channel);
1279 bcs->mode = mode;
1280 bcs->channel = bc;
1281 fifo2 = bc;
1282 if (cs->chanlimit > 1) {
1283 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1284 cs->hw.hfcpci.sctrl_e &= ~0x80;
1285 } else {
1286 if (bc) {
1287 if (mode != L1_MODE_NULL) {
1288 cs->hw.hfcpci.bswapped = 1; /* B1 and B2 exchanged */
1289 cs->hw.hfcpci.sctrl_e |= 0x80;
1290 } else {
1291 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1292 cs->hw.hfcpci.sctrl_e &= ~0x80;
1293 }
1294 fifo2 = 0;
1295 } else {
1296 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1297 cs->hw.hfcpci.sctrl_e &= ~0x80;
1298 }
1299 }
1300 switch (mode) {
1301 case (L1_MODE_NULL):
1302 if (bc) {
1303 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
1304 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
1305 } else {
1306 cs->hw.hfcpci.sctrl &= ~SCTRL_B1_ENA;
1307 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B1_ENA;
1308 }
1309 if (fifo2) {
1310 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1311 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1312 } else {
1313 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1314 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1315 }
1316 break;
1317 case (L1_MODE_TRANS):
1318 hfcpci_clear_fifo_rx(cs, fifo2);
1319 hfcpci_clear_fifo_tx(cs, fifo2);
1320 if (bc) {
1321 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1322 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1323 } else {
1324 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1325 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1326 }
1327 if (fifo2) {
1328 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1329 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1330 cs->hw.hfcpci.ctmt |= 2;
1331 cs->hw.hfcpci.conn &= ~0x18;
1332 } else {
1333 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1334 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1335 cs->hw.hfcpci.ctmt |= 1;
1336 cs->hw.hfcpci.conn &= ~0x03;
1337 }
1338 break;
1339 case (L1_MODE_HDLC):
1340 hfcpci_clear_fifo_rx(cs, fifo2);
1341 hfcpci_clear_fifo_tx(cs, fifo2);
1342 if (bc) {
1343 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1344 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1345 } else {
1346 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1347 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1348 }
1349 if (fifo2) {
1350 cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
1351 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1352 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1353 cs->hw.hfcpci.ctmt &= ~2;
1354 cs->hw.hfcpci.conn &= ~0x18;
1355 } else {
1356 cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
1357 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1358 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1359 cs->hw.hfcpci.ctmt &= ~1;
1360 cs->hw.hfcpci.conn &= ~0x03;
1361 }
1362 break;
1363 case (L1_MODE_EXTRN):
1364 if (bc) {
1365 cs->hw.hfcpci.conn |= 0x10;
1366 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1367 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1368 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1369 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1370 } else {
1371 cs->hw.hfcpci.conn |= 0x02;
1372 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1373 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1374 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1375 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1376 }
1377 break;
1378 }
1379 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e);
1380 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1381 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
1382 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
1383 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
1384 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
1385 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1386 }
1387
1388 /******************************/
1389 /* Layer2 -> Layer 1 Transfer */
1390 /******************************/
1391 static void
hfcpci_l2l1(struct PStack * st,int pr,void * arg)1392 hfcpci_l2l1(struct PStack *st, int pr, void *arg)
1393 {
1394 struct BCState *bcs = st->l1.bcs;
1395 u_long flags;
1396 struct sk_buff *skb = arg;
1397
1398 switch (pr) {
1399 case (PH_DATA | REQUEST):
1400 spin_lock_irqsave(&bcs->cs->lock, flags);
1401 if (bcs->tx_skb) {
1402 skb_queue_tail(&bcs->squeue, skb);
1403 } else {
1404 bcs->tx_skb = skb;
1405 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1406 bcs->cs->BC_Send_Data(bcs);
1407 }
1408 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1409 break;
1410 case (PH_PULL | INDICATION):
1411 spin_lock_irqsave(&bcs->cs->lock, flags);
1412 if (bcs->tx_skb) {
1413 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1414 printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n");
1415 break;
1416 }
1417 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1418 bcs->tx_skb = skb;
1419 bcs->cs->BC_Send_Data(bcs);
1420 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1421 break;
1422 case (PH_PULL | REQUEST):
1423 if (!bcs->tx_skb) {
1424 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1425 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1426 } else
1427 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1428 break;
1429 case (PH_ACTIVATE | REQUEST):
1430 spin_lock_irqsave(&bcs->cs->lock, flags);
1431 test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
1432 mode_hfcpci(bcs, st->l1.mode, st->l1.bc);
1433 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1434 l1_msg_b(st, pr, arg);
1435 break;
1436 case (PH_DEACTIVATE | REQUEST):
1437 l1_msg_b(st, pr, arg);
1438 break;
1439 case (PH_DEACTIVATE | CONFIRM):
1440 spin_lock_irqsave(&bcs->cs->lock, flags);
1441 test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
1442 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1443 mode_hfcpci(bcs, 0, st->l1.bc);
1444 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1445 st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
1446 break;
1447 }
1448 }
1449
1450 /******************************************/
1451 /* deactivate B-channel access and queues */
1452 /******************************************/
1453 static void
close_hfcpci(struct BCState * bcs)1454 close_hfcpci(struct BCState *bcs)
1455 {
1456 mode_hfcpci(bcs, 0, bcs->channel);
1457 if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
1458 skb_queue_purge(&bcs->rqueue);
1459 skb_queue_purge(&bcs->squeue);
1460 if (bcs->tx_skb) {
1461 dev_kfree_skb_any(bcs->tx_skb);
1462 bcs->tx_skb = NULL;
1463 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1464 }
1465 }
1466 }
1467
1468 /*************************************/
1469 /* init B-channel queues and control */
1470 /*************************************/
1471 static int
open_hfcpcistate(struct IsdnCardState * cs,struct BCState * bcs)1472 open_hfcpcistate(struct IsdnCardState *cs, struct BCState *bcs)
1473 {
1474 if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
1475 skb_queue_head_init(&bcs->rqueue);
1476 skb_queue_head_init(&bcs->squeue);
1477 }
1478 bcs->tx_skb = NULL;
1479 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1480 bcs->event = 0;
1481 bcs->tx_cnt = 0;
1482 return (0);
1483 }
1484
1485 /*********************************/
1486 /* inits the stack for B-channel */
1487 /*********************************/
1488 static int
setstack_2b(struct PStack * st,struct BCState * bcs)1489 setstack_2b(struct PStack *st, struct BCState *bcs)
1490 {
1491 bcs->channel = st->l1.bc;
1492 if (open_hfcpcistate(st->l1.hardware, bcs))
1493 return (-1);
1494 st->l1.bcs = bcs;
1495 st->l2.l2l1 = hfcpci_l2l1;
1496 setstack_manager(st);
1497 bcs->st = st;
1498 setstack_l1_B(st);
1499 return (0);
1500 }
1501
1502 /***************************/
1503 /* handle L1 state changes */
1504 /***************************/
1505 static void
hfcpci_bh(struct work_struct * work)1506 hfcpci_bh(struct work_struct *work)
1507 {
1508 struct IsdnCardState *cs =
1509 container_of(work, struct IsdnCardState, tqueue);
1510 u_long flags;
1511 // struct PStack *stptr;
1512
1513 if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
1514 if (!cs->hw.hfcpci.nt_mode)
1515 switch (cs->dc.hfcpci.ph_state) {
1516 case (0):
1517 l1_msg(cs, HW_RESET | INDICATION, NULL);
1518 break;
1519 case (3):
1520 l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
1521 break;
1522 case (8):
1523 l1_msg(cs, HW_RSYNC | INDICATION, NULL);
1524 break;
1525 case (6):
1526 l1_msg(cs, HW_INFO2 | INDICATION, NULL);
1527 break;
1528 case (7):
1529 l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
1530 break;
1531 default:
1532 break;
1533 } else {
1534 spin_lock_irqsave(&cs->lock, flags);
1535 switch (cs->dc.hfcpci.ph_state) {
1536 case (2):
1537 if (cs->hw.hfcpci.nt_timer < 0) {
1538 cs->hw.hfcpci.nt_timer = 0;
1539 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1540 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1541 /* Clear already pending ints */
1542 if (Read_hfc(cs, HFCPCI_INT_S1));
1543 Write_hfc(cs, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
1544 udelay(10);
1545 Write_hfc(cs, HFCPCI_STATES, 4);
1546 cs->dc.hfcpci.ph_state = 4;
1547 } else {
1548 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_TIMER;
1549 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1550 cs->hw.hfcpci.ctmt &= ~HFCPCI_AUTO_TIMER;
1551 cs->hw.hfcpci.ctmt |= HFCPCI_TIM3_125;
1552 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1553 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1554 cs->hw.hfcpci.nt_timer = NT_T1_COUNT;
1555 Write_hfc(cs, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); /* allow G2 -> G3 transition */
1556 }
1557 break;
1558 case (1):
1559 case (3):
1560 case (4):
1561 cs->hw.hfcpci.nt_timer = 0;
1562 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1563 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1564 break;
1565 default:
1566 break;
1567 }
1568 spin_unlock_irqrestore(&cs->lock, flags);
1569 }
1570 }
1571 if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
1572 DChannel_proc_rcv(cs);
1573 if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
1574 DChannel_proc_xmt(cs);
1575 }
1576
1577
1578 /********************************/
1579 /* called for card init message */
1580 /********************************/
1581 static void
inithfcpci(struct IsdnCardState * cs)1582 inithfcpci(struct IsdnCardState *cs)
1583 {
1584 cs->bcs[0].BC_SetStack = setstack_2b;
1585 cs->bcs[1].BC_SetStack = setstack_2b;
1586 cs->bcs[0].BC_Close = close_hfcpci;
1587 cs->bcs[1].BC_Close = close_hfcpci;
1588 timer_setup(&cs->dbusytimer, hfcpci_dbusy_timer, 0);
1589 mode_hfcpci(cs->bcs, 0, 0);
1590 mode_hfcpci(cs->bcs + 1, 0, 1);
1591 }
1592
1593
1594
1595 /*******************************************/
1596 /* handle card messages from control layer */
1597 /*******************************************/
1598 static int
hfcpci_card_msg(struct IsdnCardState * cs,int mt,void * arg)1599 hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
1600 {
1601 u_long flags;
1602
1603 if (cs->debug & L1_DEB_ISAC)
1604 debugl1(cs, "HFCPCI: card_msg %x", mt);
1605 switch (mt) {
1606 case CARD_RESET:
1607 spin_lock_irqsave(&cs->lock, flags);
1608 reset_hfcpci(cs);
1609 spin_unlock_irqrestore(&cs->lock, flags);
1610 return (0);
1611 case CARD_RELEASE:
1612 release_io_hfcpci(cs);
1613 return (0);
1614 case CARD_INIT:
1615 spin_lock_irqsave(&cs->lock, flags);
1616 inithfcpci(cs);
1617 reset_hfcpci(cs);
1618 spin_unlock_irqrestore(&cs->lock, flags);
1619 msleep(80); /* Timeout 80ms */
1620 /* now switch timer interrupt off */
1621 spin_lock_irqsave(&cs->lock, flags);
1622 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1623 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1624 /* reinit mode reg */
1625 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1626 spin_unlock_irqrestore(&cs->lock, flags);
1627 return (0);
1628 case CARD_TEST:
1629 return (0);
1630 }
1631 return (0);
1632 }
1633
1634
1635 /* this variable is used as card index when more than one cards are present */
1636 static struct pci_dev *dev_hfcpci = NULL;
1637
1638 int
setup_hfcpci(struct IsdnCard * card)1639 setup_hfcpci(struct IsdnCard *card)
1640 {
1641 u_long flags;
1642 struct IsdnCardState *cs = card->cs;
1643 char tmp[64];
1644 int i;
1645 struct pci_dev *tmp_hfcpci = NULL;
1646
1647 strcpy(tmp, hfcpci_revision);
1648 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1649
1650 cs->hw.hfcpci.int_s1 = 0;
1651 cs->dc.hfcpci.ph_state = 0;
1652 cs->hw.hfcpci.fifo = 255;
1653 if (cs->typ != ISDN_CTYPE_HFC_PCI)
1654 return (0);
1655
1656 i = 0;
1657 while (id_list[i].vendor_id) {
1658 tmp_hfcpci = hisax_find_pci_device(id_list[i].vendor_id,
1659 id_list[i].device_id,
1660 dev_hfcpci);
1661 i++;
1662 if (tmp_hfcpci) {
1663 dma_addr_t dma_mask = DMA_BIT_MASK(32) & ~0x7fffUL;
1664 if (pci_enable_device(tmp_hfcpci))
1665 continue;
1666 if (pci_set_dma_mask(tmp_hfcpci, dma_mask)) {
1667 printk(KERN_WARNING
1668 "HiSax hfc_pci: No suitable DMA available.\n");
1669 continue;
1670 }
1671 if (pci_set_consistent_dma_mask(tmp_hfcpci, dma_mask)) {
1672 printk(KERN_WARNING
1673 "HiSax hfc_pci: No suitable consistent DMA available.\n");
1674 continue;
1675 }
1676 pci_set_master(tmp_hfcpci);
1677 if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[0].start & PCI_BASE_ADDRESS_IO_MASK)))
1678 continue;
1679 else
1680 break;
1681 }
1682 }
1683
1684 if (!tmp_hfcpci) {
1685 printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
1686 return (0);
1687 }
1688
1689 i--;
1690 dev_hfcpci = tmp_hfcpci; /* old device */
1691 cs->hw.hfcpci.dev = dev_hfcpci;
1692 cs->irq = dev_hfcpci->irq;
1693 if (!cs->irq) {
1694 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1695 return (0);
1696 }
1697 cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
1698 printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
1699
1700 if (!cs->hw.hfcpci.pci_io) {
1701 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1702 return (0);
1703 }
1704
1705 /* Allocate memory for FIFOS */
1706 cs->hw.hfcpci.fifos = pci_alloc_consistent(cs->hw.hfcpci.dev,
1707 0x8000, &cs->hw.hfcpci.dma);
1708 if (!cs->hw.hfcpci.fifos) {
1709 printk(KERN_WARNING "HFC-PCI: Error allocating FIFO memory!\n");
1710 return 0;
1711 }
1712 if (cs->hw.hfcpci.dma & 0x7fff) {
1713 printk(KERN_WARNING
1714 "HFC-PCI: Error DMA memory not on 32K boundary (%lx)\n",
1715 (u_long)cs->hw.hfcpci.dma);
1716 pci_free_consistent(cs->hw.hfcpci.dev, 0x8000,
1717 cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma);
1718 return 0;
1719 }
1720 pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u32)cs->hw.hfcpci.dma);
1721 cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
1722 printk(KERN_INFO
1723 "HFC-PCI: defined at mem %p fifo %p(%lx) IRQ %d HZ %d\n",
1724 cs->hw.hfcpci.pci_io,
1725 cs->hw.hfcpci.fifos,
1726 (u_long)cs->hw.hfcpci.dma,
1727 cs->irq, HZ);
1728
1729 spin_lock_irqsave(&cs->lock, flags);
1730
1731 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
1732 cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
1733 cs->hw.hfcpci.int_m1 = 0;
1734 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1735 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
1736 /* At this point the needed PCI config is done */
1737 /* fifos are still not enabled */
1738
1739 INIT_WORK(&cs->tqueue, hfcpci_bh);
1740 cs->setstack_d = setstack_hfcpci;
1741 cs->BC_Send_Data = &hfcpci_send_data;
1742 cs->readisac = NULL;
1743 cs->writeisac = NULL;
1744 cs->readisacfifo = NULL;
1745 cs->writeisacfifo = NULL;
1746 cs->BC_Read_Reg = NULL;
1747 cs->BC_Write_Reg = NULL;
1748 cs->irq_func = &hfcpci_interrupt;
1749 cs->irq_flags |= IRQF_SHARED;
1750 timer_setup(&cs->hw.hfcpci.timer, hfcpci_Timer, 0);
1751 cs->cardmsg = &hfcpci_card_msg;
1752 cs->auxcmd = &hfcpci_auxcmd;
1753
1754 spin_unlock_irqrestore(&cs->lock, flags);
1755
1756 return (1);
1757 }
1758