1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * hfcpci.c low level driver for CCD's hfc-pci based cards
5 *
6 * Author Werner Cornelius (werner@isdn4linux.de)
7 * based on existing driver for CCD hfc ISA cards
8 * type approval valid for HFC-S PCI A based card
9 *
10 * Copyright 1999 by Werner Cornelius (werner@isdn-development.de)
11 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
12 *
13 * Module options:
14 *
15 * debug:
16 * NOTE: only one poll value must be given for all cards
17 * See hfc_pci.h for debug flags.
18 *
19 * poll:
20 * NOTE: only one poll value must be given for all cards
21 * Give the number of samples for each fifo process.
22 * By default 128 is used. Decrease to reduce delay, increase to
23 * reduce cpu load. If unsure, don't mess with it!
24 * A value of 128 will use controller's interrupt. Other values will
25 * use kernel timer, because the controller will not allow lower values
26 * than 128.
27 * Also note that the value depends on the kernel timer frequency.
28 * If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible.
29 * If the kernel uses 100 Hz, steps of 80 samples are possible.
30 * If the kernel uses 300 Hz, steps of about 26 samples are possible.
31 */
32
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/pci.h>
36 #include <linux/delay.h>
37 #include <linux/mISDNhw.h>
38 #include <linux/slab.h>
39
40 #include "hfc_pci.h"
41
42 static const char *hfcpci_revision = "2.0";
43
44 static int HFC_cnt;
45 static uint debug;
46 static uint poll, tics;
47 static struct timer_list hfc_tl;
48 static unsigned long hfc_jiffies;
49
50 MODULE_AUTHOR("Karsten Keil");
51 MODULE_LICENSE("GPL");
52 module_param(debug, uint, S_IRUGO | S_IWUSR);
53 module_param(poll, uint, S_IRUGO | S_IWUSR);
54
55 enum {
56 HFC_CCD_2BD0,
57 HFC_CCD_B000,
58 HFC_CCD_B006,
59 HFC_CCD_B007,
60 HFC_CCD_B008,
61 HFC_CCD_B009,
62 HFC_CCD_B00A,
63 HFC_CCD_B00B,
64 HFC_CCD_B00C,
65 HFC_CCD_B100,
66 HFC_CCD_B700,
67 HFC_CCD_B701,
68 HFC_ASUS_0675,
69 HFC_BERKOM_A1T,
70 HFC_BERKOM_TCONCEPT,
71 HFC_ANIGMA_MC145575,
72 HFC_ZOLTRIX_2BD0,
73 HFC_DIGI_DF_M_IOM2_E,
74 HFC_DIGI_DF_M_E,
75 HFC_DIGI_DF_M_IOM2_A,
76 HFC_DIGI_DF_M_A,
77 HFC_ABOCOM_2BD1,
78 HFC_SITECOM_DC105V2,
79 };
80
81 struct hfcPCI_hw {
82 unsigned char cirm;
83 unsigned char ctmt;
84 unsigned char clkdel;
85 unsigned char states;
86 unsigned char conn;
87 unsigned char mst_m;
88 unsigned char int_m1;
89 unsigned char int_m2;
90 unsigned char sctrl;
91 unsigned char sctrl_r;
92 unsigned char sctrl_e;
93 unsigned char trm;
94 unsigned char fifo_en;
95 unsigned char bswapped;
96 unsigned char protocol;
97 int nt_timer;
98 unsigned char __iomem *pci_io; /* start of PCI IO memory */
99 dma_addr_t dmahandle;
100 void *fifos; /* FIFO memory */
101 int last_bfifo_cnt[2];
102 /* marker saving last b-fifo frame count */
103 struct timer_list timer;
104 };
105
106 #define HFC_CFG_MASTER 1
107 #define HFC_CFG_SLAVE 2
108 #define HFC_CFG_PCM 3
109 #define HFC_CFG_2HFC 4
110 #define HFC_CFG_SLAVEHFC 5
111 #define HFC_CFG_NEG_F0 6
112 #define HFC_CFG_SW_DD_DU 7
113
114 #define FLG_HFC_TIMER_T1 16
115 #define FLG_HFC_TIMER_T3 17
116
117 #define NT_T1_COUNT 1120 /* number of 3.125ms interrupts (3.5s) */
118 #define NT_T3_COUNT 31 /* number of 3.125ms interrupts (97 ms) */
119 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
120 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
121
122
123 struct hfc_pci {
124 u_char subtype;
125 u_char chanlimit;
126 u_char initdone;
127 u_long cfg;
128 u_int irq;
129 u_int irqcnt;
130 struct pci_dev *pdev;
131 struct hfcPCI_hw hw;
132 spinlock_t lock; /* card lock */
133 struct dchannel dch;
134 struct bchannel bch[2];
135 };
136
137 /* Interface functions */
138 static void
enable_hwirq(struct hfc_pci * hc)139 enable_hwirq(struct hfc_pci *hc)
140 {
141 hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE;
142 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
143 }
144
145 static void
disable_hwirq(struct hfc_pci * hc)146 disable_hwirq(struct hfc_pci *hc)
147 {
148 hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE);
149 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
150 }
151
152 /*
153 * free hardware resources used by driver
154 */
155 static void
release_io_hfcpci(struct hfc_pci * hc)156 release_io_hfcpci(struct hfc_pci *hc)
157 {
158 /* disable memory mapped ports + busmaster */
159 pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
160 del_timer(&hc->hw.timer);
161 dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
162 hc->hw.dmahandle);
163 iounmap(hc->hw.pci_io);
164 }
165
166 /*
167 * set mode (NT or TE)
168 */
169 static void
hfcpci_setmode(struct hfc_pci * hc)170 hfcpci_setmode(struct hfc_pci *hc)
171 {
172 if (hc->hw.protocol == ISDN_P_NT_S0) {
173 hc->hw.clkdel = CLKDEL_NT; /* ST-Bit delay for NT-Mode */
174 hc->hw.sctrl |= SCTRL_MODE_NT; /* NT-MODE */
175 hc->hw.states = 1; /* G1 */
176 } else {
177 hc->hw.clkdel = CLKDEL_TE; /* ST-Bit delay for TE-Mode */
178 hc->hw.sctrl &= ~SCTRL_MODE_NT; /* TE-MODE */
179 hc->hw.states = 2; /* F2 */
180 }
181 Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel);
182 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states);
183 udelay(10);
184 Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */
185 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
186 }
187
188 /*
189 * function called to reset the HFC PCI chip. A complete software reset of chip
190 * and fifos is done.
191 */
192 static void
reset_hfcpci(struct hfc_pci * hc)193 reset_hfcpci(struct hfc_pci *hc)
194 {
195 u_char val;
196 int cnt = 0;
197
198 printk(KERN_DEBUG "reset_hfcpci: entered\n");
199 val = Read_hfc(hc, HFCPCI_CHIP_ID);
200 printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val);
201 /* enable memory mapped ports, disable busmaster */
202 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
203 disable_hwirq(hc);
204 /* enable memory ports + busmaster */
205 pci_write_config_word(hc->pdev, PCI_COMMAND,
206 PCI_ENA_MEMIO + PCI_ENA_MASTER);
207 val = Read_hfc(hc, HFCPCI_STATUS);
208 printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val);
209 hc->hw.cirm = HFCPCI_RESET; /* Reset On */
210 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
211 set_current_state(TASK_UNINTERRUPTIBLE);
212 mdelay(10); /* Timeout 10ms */
213 hc->hw.cirm = 0; /* Reset Off */
214 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
215 val = Read_hfc(hc, HFCPCI_STATUS);
216 printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val);
217 while (cnt < 50000) { /* max 50000 us */
218 udelay(5);
219 cnt += 5;
220 val = Read_hfc(hc, HFCPCI_STATUS);
221 if (!(val & 2))
222 break;
223 }
224 printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt);
225
226 hc->hw.fifo_en = 0x30; /* only D fifos enabled */
227
228 hc->hw.bswapped = 0; /* no exchange */
229 hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
230 hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
231 hc->hw.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
232 hc->hw.sctrl_r = 0;
233 hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE; /* S/T Auto awake */
234 hc->hw.mst_m = 0;
235 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
236 hc->hw.mst_m |= HFCPCI_MASTER; /* HFC Master Mode */
237 if (test_bit(HFC_CFG_NEG_F0, &hc->cfg))
238 hc->hw.mst_m |= HFCPCI_F0_NEGATIV;
239 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
240 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
241 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
242 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
243
244 hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
245 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
246 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
247
248 /* Clear already pending ints */
249 val = Read_hfc(hc, HFCPCI_INT_S1);
250
251 /* set NT/TE mode */
252 hfcpci_setmode(hc);
253
254 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
255 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
256
257 /*
258 * Init GCI/IOM2 in master mode
259 * Slots 0 and 1 are set for B-chan 1 and 2
260 * D- and monitor/CI channel are not enabled
261 * STIO1 is used as output for data, B1+B2 from ST->IOM+HFC
262 * STIO2 is used as data input, B1+B2 from IOM->ST
263 * ST B-channel send disabled -> continuous 1s
264 * The IOM slots are always enabled
265 */
266 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
267 /* set data flow directions: connect B1,B2: HFC to/from PCM */
268 hc->hw.conn = 0x09;
269 } else {
270 hc->hw.conn = 0x36; /* set data flow directions */
271 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
272 Write_hfc(hc, HFCPCI_B1_SSL, 0xC0);
273 Write_hfc(hc, HFCPCI_B2_SSL, 0xC1);
274 Write_hfc(hc, HFCPCI_B1_RSL, 0xC0);
275 Write_hfc(hc, HFCPCI_B2_RSL, 0xC1);
276 } else {
277 Write_hfc(hc, HFCPCI_B1_SSL, 0x80);
278 Write_hfc(hc, HFCPCI_B2_SSL, 0x81);
279 Write_hfc(hc, HFCPCI_B1_RSL, 0x80);
280 Write_hfc(hc, HFCPCI_B2_RSL, 0x81);
281 }
282 }
283 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
284 val = Read_hfc(hc, HFCPCI_INT_S2);
285 }
286
287 /*
288 * Timer function called when kernel timer expires
289 */
290 static void
hfcpci_Timer(struct timer_list * t)291 hfcpci_Timer(struct timer_list *t)
292 {
293 struct hfc_pci *hc = from_timer(hc, t, hw.timer);
294 hc->hw.timer.expires = jiffies + 75;
295 /* WD RESET */
296 /*
297 * WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80);
298 * add_timer(&hc->hw.timer);
299 */
300 }
301
302
303 /*
304 * select a b-channel entry matching and active
305 */
306 static struct bchannel *
Sel_BCS(struct hfc_pci * hc,int channel)307 Sel_BCS(struct hfc_pci *hc, int channel)
308 {
309 if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) &&
310 (hc->bch[0].nr & channel))
311 return &hc->bch[0];
312 else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) &&
313 (hc->bch[1].nr & channel))
314 return &hc->bch[1];
315 else
316 return NULL;
317 }
318
319 /*
320 * clear the desired B-channel rx fifo
321 */
322 static void
hfcpci_clear_fifo_rx(struct hfc_pci * hc,int fifo)323 hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo)
324 {
325 u_char fifo_state;
326 struct bzfifo *bzr;
327
328 if (fifo) {
329 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
330 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX;
331 } else {
332 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
333 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX;
334 }
335 if (fifo_state)
336 hc->hw.fifo_en ^= fifo_state;
337 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
338 hc->hw.last_bfifo_cnt[fifo] = 0;
339 bzr->f1 = MAX_B_FRAMES;
340 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
341 bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
342 bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16(
343 le16_to_cpu(bzr->za[MAX_B_FRAMES].z1));
344 if (fifo_state)
345 hc->hw.fifo_en |= fifo_state;
346 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
347 }
348
349 /*
350 * clear the desired B-channel tx fifo
351 */
hfcpci_clear_fifo_tx(struct hfc_pci * hc,int fifo)352 static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
353 {
354 u_char fifo_state;
355 struct bzfifo *bzt;
356
357 if (fifo) {
358 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
359 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX;
360 } else {
361 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
362 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX;
363 }
364 if (fifo_state)
365 hc->hw.fifo_en ^= fifo_state;
366 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
367 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
368 printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) "
369 "z1(%x) z2(%x) state(%x)\n",
370 fifo, bzt->f1, bzt->f2,
371 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
372 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2),
373 fifo_state);
374 bzt->f2 = MAX_B_FRAMES;
375 bzt->f1 = bzt->f2; /* init F pointers to remain constant */
376 bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
377 bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2);
378 if (fifo_state)
379 hc->hw.fifo_en |= fifo_state;
380 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
381 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
382 printk(KERN_DEBUG
383 "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n",
384 fifo, bzt->f1, bzt->f2,
385 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
386 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2));
387 }
388
389 /*
390 * read a complete B-frame out of the buffer
391 */
392 static void
hfcpci_empty_bfifo(struct bchannel * bch,struct bzfifo * bz,u_char * bdata,int count)393 hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
394 u_char *bdata, int count)
395 {
396 u_char *ptr, *ptr1, new_f2;
397 int maxlen, new_z2;
398 struct zt *zp;
399
400 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
401 printk(KERN_DEBUG "hfcpci_empty_fifo\n");
402 zp = &bz->za[bz->f2]; /* point to Z-Regs */
403 new_z2 = le16_to_cpu(zp->z2) + count; /* new position in fifo */
404 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
405 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
406 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
407 if ((count > MAX_DATA_SIZE + 3) || (count < 4) ||
408 (*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) {
409 if (bch->debug & DEBUG_HW)
410 printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet "
411 "invalid length %d or crc\n", count);
412 #ifdef ERROR_STATISTIC
413 bch->err_inv++;
414 #endif
415 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
416 bz->f2 = new_f2; /* next buffer */
417 } else {
418 bch->rx_skb = mI_alloc_skb(count - 3, GFP_ATOMIC);
419 if (!bch->rx_skb) {
420 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
421 return;
422 }
423 count -= 3;
424 ptr = skb_put(bch->rx_skb, count);
425
426 if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL)
427 maxlen = count; /* complete transfer */
428 else
429 maxlen = B_FIFO_SIZE + B_SUB_VAL -
430 le16_to_cpu(zp->z2); /* maximum */
431
432 ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL);
433 /* start of data */
434 memcpy(ptr, ptr1, maxlen); /* copy data */
435 count -= maxlen;
436
437 if (count) { /* rest remaining */
438 ptr += maxlen;
439 ptr1 = bdata; /* start of buffer */
440 memcpy(ptr, ptr1, count); /* rest */
441 }
442 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
443 bz->f2 = new_f2; /* next buffer */
444 recv_Bchannel(bch, MISDN_ID_ANY, false);
445 }
446 }
447
448 /*
449 * D-channel receive procedure
450 */
451 static int
receive_dmsg(struct hfc_pci * hc)452 receive_dmsg(struct hfc_pci *hc)
453 {
454 struct dchannel *dch = &hc->dch;
455 int maxlen;
456 int rcnt, total;
457 int count = 5;
458 u_char *ptr, *ptr1;
459 struct dfifo *df;
460 struct zt *zp;
461
462 df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx;
463 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
464 zp = &df->za[df->f2 & D_FREG_MASK];
465 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
466 if (rcnt < 0)
467 rcnt += D_FIFO_SIZE;
468 rcnt++;
469 if (dch->debug & DEBUG_HW_DCHANNEL)
470 printk(KERN_DEBUG
471 "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n",
472 df->f1, df->f2,
473 le16_to_cpu(zp->z1),
474 le16_to_cpu(zp->z2),
475 rcnt);
476
477 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
478 (df->data[le16_to_cpu(zp->z1)])) {
479 if (dch->debug & DEBUG_HW)
480 printk(KERN_DEBUG
481 "empty_fifo hfcpci packet inv. len "
482 "%d or crc %d\n",
483 rcnt,
484 df->data[le16_to_cpu(zp->z1)]);
485 #ifdef ERROR_STATISTIC
486 cs->err_rx++;
487 #endif
488 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
489 (MAX_D_FRAMES + 1); /* next buffer */
490 df->za[df->f2 & D_FREG_MASK].z2 =
491 cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) &
492 (D_FIFO_SIZE - 1));
493 } else {
494 dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
495 if (!dch->rx_skb) {
496 printk(KERN_WARNING
497 "HFC-PCI: D receive out of memory\n");
498 break;
499 }
500 total = rcnt;
501 rcnt -= 3;
502 ptr = skb_put(dch->rx_skb, rcnt);
503
504 if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE)
505 maxlen = rcnt; /* complete transfer */
506 else
507 maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2);
508 /* maximum */
509
510 ptr1 = df->data + le16_to_cpu(zp->z2);
511 /* start of data */
512 memcpy(ptr, ptr1, maxlen); /* copy data */
513 rcnt -= maxlen;
514
515 if (rcnt) { /* rest remaining */
516 ptr += maxlen;
517 ptr1 = df->data; /* start of buffer */
518 memcpy(ptr, ptr1, rcnt); /* rest */
519 }
520 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
521 (MAX_D_FRAMES + 1); /* next buffer */
522 df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16((
523 le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1));
524 recv_Dchannel(dch);
525 }
526 }
527 return 1;
528 }
529
530 /*
531 * check for transparent receive data and read max one 'poll' size if avail
532 */
533 static void
hfcpci_empty_fifo_trans(struct bchannel * bch,struct bzfifo * rxbz,struct bzfifo * txbz,u_char * bdata)534 hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
535 struct bzfifo *txbz, u_char *bdata)
536 {
537 __le16 *z1r, *z2r, *z1t, *z2t;
538 int new_z2, fcnt_rx, fcnt_tx, maxlen;
539 u_char *ptr, *ptr1;
540
541 z1r = &rxbz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
542 z2r = z1r + 1;
543 z1t = &txbz->za[MAX_B_FRAMES].z1;
544 z2t = z1t + 1;
545
546 fcnt_rx = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
547 if (!fcnt_rx)
548 return; /* no data avail */
549
550 if (fcnt_rx <= 0)
551 fcnt_rx += B_FIFO_SIZE; /* bytes actually buffered */
552 new_z2 = le16_to_cpu(*z2r) + fcnt_rx; /* new position in fifo */
553 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
554 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
555
556 fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
557 if (fcnt_tx <= 0)
558 fcnt_tx += B_FIFO_SIZE;
559 /* fcnt_tx contains available bytes in tx-fifo */
560 fcnt_tx = B_FIFO_SIZE - fcnt_tx;
561 /* remaining bytes to send (bytes in tx-fifo) */
562
563 if (test_bit(FLG_RX_OFF, &bch->Flags)) {
564 bch->dropcnt += fcnt_rx;
565 *z2r = cpu_to_le16(new_z2);
566 return;
567 }
568 maxlen = bchannel_get_rxbuf(bch, fcnt_rx);
569 if (maxlen < 0) {
570 pr_warn("B%d: No bufferspace for %d bytes\n", bch->nr, fcnt_rx);
571 } else {
572 ptr = skb_put(bch->rx_skb, fcnt_rx);
573 if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL)
574 maxlen = fcnt_rx; /* complete transfer */
575 else
576 maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r);
577 /* maximum */
578
579 ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL);
580 /* start of data */
581 memcpy(ptr, ptr1, maxlen); /* copy data */
582 fcnt_rx -= maxlen;
583
584 if (fcnt_rx) { /* rest remaining */
585 ptr += maxlen;
586 ptr1 = bdata; /* start of buffer */
587 memcpy(ptr, ptr1, fcnt_rx); /* rest */
588 }
589 recv_Bchannel(bch, fcnt_tx, false); /* bch, id, !force */
590 }
591 *z2r = cpu_to_le16(new_z2); /* new position */
592 }
593
594 /*
595 * B-channel main receive routine
596 */
597 static void
main_rec_hfcpci(struct bchannel * bch)598 main_rec_hfcpci(struct bchannel *bch)
599 {
600 struct hfc_pci *hc = bch->hw;
601 int rcnt, real_fifo;
602 int receive = 0, count = 5;
603 struct bzfifo *txbz, *rxbz;
604 u_char *bdata;
605 struct zt *zp;
606
607 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
608 rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
609 txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
610 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
611 real_fifo = 1;
612 } else {
613 rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
614 txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
615 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1;
616 real_fifo = 0;
617 }
618 Begin:
619 count--;
620 if (rxbz->f1 != rxbz->f2) {
621 if (bch->debug & DEBUG_HW_BCHANNEL)
622 printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n",
623 bch->nr, rxbz->f1, rxbz->f2);
624 zp = &rxbz->za[rxbz->f2];
625
626 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
627 if (rcnt < 0)
628 rcnt += B_FIFO_SIZE;
629 rcnt++;
630 if (bch->debug & DEBUG_HW_BCHANNEL)
631 printk(KERN_DEBUG
632 "hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n",
633 bch->nr, le16_to_cpu(zp->z1),
634 le16_to_cpu(zp->z2), rcnt);
635 hfcpci_empty_bfifo(bch, rxbz, bdata, rcnt);
636 rcnt = rxbz->f1 - rxbz->f2;
637 if (rcnt < 0)
638 rcnt += MAX_B_FRAMES + 1;
639 if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) {
640 rcnt = 0;
641 hfcpci_clear_fifo_rx(hc, real_fifo);
642 }
643 hc->hw.last_bfifo_cnt[real_fifo] = rcnt;
644 if (rcnt > 1)
645 receive = 1;
646 else
647 receive = 0;
648 } else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
649 hfcpci_empty_fifo_trans(bch, rxbz, txbz, bdata);
650 return;
651 } else
652 receive = 0;
653 if (count && receive)
654 goto Begin;
655
656 }
657
658 /*
659 * D-channel send routine
660 */
661 static void
hfcpci_fill_dfifo(struct hfc_pci * hc)662 hfcpci_fill_dfifo(struct hfc_pci *hc)
663 {
664 struct dchannel *dch = &hc->dch;
665 int fcnt;
666 int count, new_z1, maxlen;
667 struct dfifo *df;
668 u_char *src, *dst, new_f1;
669
670 if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO))
671 printk(KERN_DEBUG "%s\n", __func__);
672
673 if (!dch->tx_skb)
674 return;
675 count = dch->tx_skb->len - dch->tx_idx;
676 if (count <= 0)
677 return;
678 df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx;
679
680 if (dch->debug & DEBUG_HW_DFIFO)
681 printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__,
682 df->f1, df->f2,
683 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1));
684 fcnt = df->f1 - df->f2; /* frame count actually buffered */
685 if (fcnt < 0)
686 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
687 if (fcnt > (MAX_D_FRAMES - 1)) {
688 if (dch->debug & DEBUG_HW_DCHANNEL)
689 printk(KERN_DEBUG
690 "hfcpci_fill_Dfifo more as 14 frames\n");
691 #ifdef ERROR_STATISTIC
692 cs->err_tx++;
693 #endif
694 return;
695 }
696 /* now determine free bytes in FIFO buffer */
697 maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) -
698 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1;
699 if (maxlen <= 0)
700 maxlen += D_FIFO_SIZE; /* count now contains available bytes */
701
702 if (dch->debug & DEBUG_HW_DCHANNEL)
703 printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n",
704 count, maxlen);
705 if (count > maxlen) {
706 if (dch->debug & DEBUG_HW_DCHANNEL)
707 printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n");
708 return;
709 }
710 new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) &
711 (D_FIFO_SIZE - 1);
712 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
713 src = dch->tx_skb->data + dch->tx_idx; /* source pointer */
714 dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
715 maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
716 /* end fifo */
717 if (maxlen > count)
718 maxlen = count; /* limit size */
719 memcpy(dst, src, maxlen); /* first copy */
720
721 count -= maxlen; /* remaining bytes */
722 if (count) {
723 dst = df->data; /* start of buffer */
724 src += maxlen; /* new position */
725 memcpy(dst, src, count);
726 }
727 df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
728 /* for next buffer */
729 df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
730 /* new pos actual buffer */
731 df->f1 = new_f1; /* next frame */
732 dch->tx_idx = dch->tx_skb->len;
733 }
734
735 /*
736 * B-channel send routine
737 */
738 static void
hfcpci_fill_fifo(struct bchannel * bch)739 hfcpci_fill_fifo(struct bchannel *bch)
740 {
741 struct hfc_pci *hc = bch->hw;
742 int maxlen, fcnt;
743 int count, new_z1;
744 struct bzfifo *bz;
745 u_char *bdata;
746 u_char new_f1, *src, *dst;
747 __le16 *z1t, *z2t;
748
749 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
750 printk(KERN_DEBUG "%s\n", __func__);
751 if ((!bch->tx_skb) || bch->tx_skb->len == 0) {
752 if (!test_bit(FLG_FILLEMPTY, &bch->Flags) &&
753 !test_bit(FLG_TRANSPARENT, &bch->Flags))
754 return;
755 count = HFCPCI_FILLEMPTY;
756 } else {
757 count = bch->tx_skb->len - bch->tx_idx;
758 }
759 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
760 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
761 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
762 } else {
763 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
764 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1;
765 }
766
767 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
768 z1t = &bz->za[MAX_B_FRAMES].z1;
769 z2t = z1t + 1;
770 if (bch->debug & DEBUG_HW_BCHANNEL)
771 printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) "
772 "cnt(%d) z1(%x) z2(%x)\n", bch->nr, count,
773 le16_to_cpu(*z1t), le16_to_cpu(*z2t));
774 fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
775 if (fcnt <= 0)
776 fcnt += B_FIFO_SIZE;
777 if (test_bit(FLG_FILLEMPTY, &bch->Flags)) {
778 /* fcnt contains available bytes in fifo */
779 if (count > fcnt)
780 count = fcnt;
781 new_z1 = le16_to_cpu(*z1t) + count;
782 /* new buffer Position */
783 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
784 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
785 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
786 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
787 /* end of fifo */
788 if (bch->debug & DEBUG_HW_BFIFO)
789 printk(KERN_DEBUG "hfcpci_FFt fillempty "
790 "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n",
791 fcnt, maxlen, new_z1, dst);
792 if (maxlen > count)
793 maxlen = count; /* limit size */
794 memset(dst, bch->fill[0], maxlen); /* first copy */
795 count -= maxlen; /* remaining bytes */
796 if (count) {
797 dst = bdata; /* start of buffer */
798 memset(dst, bch->fill[0], count);
799 }
800 *z1t = cpu_to_le16(new_z1); /* now send data */
801 return;
802 }
803 /* fcnt contains available bytes in fifo */
804 fcnt = B_FIFO_SIZE - fcnt;
805 /* remaining bytes to send (bytes in fifo) */
806
807 next_t_frame:
808 count = bch->tx_skb->len - bch->tx_idx;
809 /* maximum fill shall be poll*2 */
810 if (count > (poll << 1) - fcnt)
811 count = (poll << 1) - fcnt;
812 if (count <= 0)
813 return;
814 /* data is suitable for fifo */
815 new_z1 = le16_to_cpu(*z1t) + count;
816 /* new buffer Position */
817 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
818 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
819 src = bch->tx_skb->data + bch->tx_idx;
820 /* source pointer */
821 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
822 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
823 /* end of fifo */
824 if (bch->debug & DEBUG_HW_BFIFO)
825 printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) "
826 "maxl(%d) nz1(%x) dst(%p)\n",
827 fcnt, maxlen, new_z1, dst);
828 fcnt += count;
829 bch->tx_idx += count;
830 if (maxlen > count)
831 maxlen = count; /* limit size */
832 memcpy(dst, src, maxlen); /* first copy */
833 count -= maxlen; /* remaining bytes */
834 if (count) {
835 dst = bdata; /* start of buffer */
836 src += maxlen; /* new position */
837 memcpy(dst, src, count);
838 }
839 *z1t = cpu_to_le16(new_z1); /* now send data */
840 if (bch->tx_idx < bch->tx_skb->len)
841 return;
842 dev_kfree_skb(bch->tx_skb);
843 if (get_next_bframe(bch))
844 goto next_t_frame;
845 return;
846 }
847 if (bch->debug & DEBUG_HW_BCHANNEL)
848 printk(KERN_DEBUG
849 "%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n",
850 __func__, bch->nr, bz->f1, bz->f2,
851 bz->za[bz->f1].z1);
852 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
853 if (fcnt < 0)
854 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
855 if (fcnt > (MAX_B_FRAMES - 1)) {
856 if (bch->debug & DEBUG_HW_BCHANNEL)
857 printk(KERN_DEBUG
858 "hfcpci_fill_Bfifo more as 14 frames\n");
859 return;
860 }
861 /* now determine free bytes in FIFO buffer */
862 maxlen = le16_to_cpu(bz->za[bz->f2].z2) -
863 le16_to_cpu(bz->za[bz->f1].z1) - 1;
864 if (maxlen <= 0)
865 maxlen += B_FIFO_SIZE; /* count now contains available bytes */
866
867 if (bch->debug & DEBUG_HW_BCHANNEL)
868 printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n",
869 bch->nr, count, maxlen);
870
871 if (maxlen < count) {
872 if (bch->debug & DEBUG_HW_BCHANNEL)
873 printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n");
874 return;
875 }
876 new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count;
877 /* new buffer Position */
878 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
879 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
880
881 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
882 src = bch->tx_skb->data + bch->tx_idx; /* source pointer */
883 dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL);
884 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1);
885 /* end fifo */
886 if (maxlen > count)
887 maxlen = count; /* limit size */
888 memcpy(dst, src, maxlen); /* first copy */
889
890 count -= maxlen; /* remaining bytes */
891 if (count) {
892 dst = bdata; /* start of buffer */
893 src += maxlen; /* new position */
894 memcpy(dst, src, count);
895 }
896 bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
897 bz->f1 = new_f1; /* next frame */
898 dev_kfree_skb(bch->tx_skb);
899 get_next_bframe(bch);
900 }
901
902
903
904 /*
905 * handle L1 state changes TE
906 */
907
908 static void
ph_state_te(struct dchannel * dch)909 ph_state_te(struct dchannel *dch)
910 {
911 if (dch->debug)
912 printk(KERN_DEBUG "%s: TE newstate %x\n",
913 __func__, dch->state);
914 switch (dch->state) {
915 case 0:
916 l1_event(dch->l1, HW_RESET_IND);
917 break;
918 case 3:
919 l1_event(dch->l1, HW_DEACT_IND);
920 break;
921 case 5:
922 case 8:
923 l1_event(dch->l1, ANYSIGNAL);
924 break;
925 case 6:
926 l1_event(dch->l1, INFO2);
927 break;
928 case 7:
929 l1_event(dch->l1, INFO4_P8);
930 break;
931 }
932 }
933
934 /*
935 * handle L1 state changes NT
936 */
937
938 static void
handle_nt_timer3(struct dchannel * dch)939 handle_nt_timer3(struct dchannel *dch) {
940 struct hfc_pci *hc = dch->hw;
941
942 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
943 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
944 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
945 hc->hw.nt_timer = 0;
946 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
947 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
948 hc->hw.mst_m |= HFCPCI_MASTER;
949 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
950 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
951 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
952 }
953
954 static void
ph_state_nt(struct dchannel * dch)955 ph_state_nt(struct dchannel *dch)
956 {
957 struct hfc_pci *hc = dch->hw;
958
959 if (dch->debug)
960 printk(KERN_DEBUG "%s: NT newstate %x\n",
961 __func__, dch->state);
962 switch (dch->state) {
963 case 2:
964 if (hc->hw.nt_timer < 0) {
965 hc->hw.nt_timer = 0;
966 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
967 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
968 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
969 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
970 /* Clear already pending ints */
971 (void) Read_hfc(hc, HFCPCI_INT_S1);
972 Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
973 udelay(10);
974 Write_hfc(hc, HFCPCI_STATES, 4);
975 dch->state = 4;
976 } else if (hc->hw.nt_timer == 0) {
977 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
978 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
979 hc->hw.nt_timer = NT_T1_COUNT;
980 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
981 hc->hw.ctmt |= HFCPCI_TIM3_125;
982 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
983 HFCPCI_CLTIMER);
984 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
985 test_and_set_bit(FLG_HFC_TIMER_T1, &dch->Flags);
986 /* allow G2 -> G3 transition */
987 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
988 } else {
989 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
990 }
991 break;
992 case 1:
993 hc->hw.nt_timer = 0;
994 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
995 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
996 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
997 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
998 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
999 hc->hw.mst_m &= ~HFCPCI_MASTER;
1000 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1001 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1002 _queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
1003 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1004 break;
1005 case 4:
1006 hc->hw.nt_timer = 0;
1007 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
1008 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1009 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1010 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1011 break;
1012 case 3:
1013 if (!test_and_set_bit(FLG_HFC_TIMER_T3, &dch->Flags)) {
1014 if (!test_and_clear_bit(FLG_L2_ACTIVATED,
1015 &dch->Flags)) {
1016 handle_nt_timer3(dch);
1017 break;
1018 }
1019 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1020 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
1021 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1022 hc->hw.nt_timer = NT_T3_COUNT;
1023 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
1024 hc->hw.ctmt |= HFCPCI_TIM3_125;
1025 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
1026 HFCPCI_CLTIMER);
1027 }
1028 break;
1029 }
1030 }
1031
1032 static void
ph_state(struct dchannel * dch)1033 ph_state(struct dchannel *dch)
1034 {
1035 struct hfc_pci *hc = dch->hw;
1036
1037 if (hc->hw.protocol == ISDN_P_NT_S0) {
1038 if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) &&
1039 hc->hw.nt_timer < 0)
1040 handle_nt_timer3(dch);
1041 else
1042 ph_state_nt(dch);
1043 } else
1044 ph_state_te(dch);
1045 }
1046
1047 /*
1048 * Layer 1 callback function
1049 */
1050 static int
hfc_l1callback(struct dchannel * dch,u_int cmd)1051 hfc_l1callback(struct dchannel *dch, u_int cmd)
1052 {
1053 struct hfc_pci *hc = dch->hw;
1054
1055 switch (cmd) {
1056 case INFO3_P8:
1057 case INFO3_P10:
1058 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1059 hc->hw.mst_m |= HFCPCI_MASTER;
1060 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1061 break;
1062 case HW_RESET_REQ:
1063 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3);
1064 /* HFC ST 3 */
1065 udelay(6);
1066 Write_hfc(hc, HFCPCI_STATES, 3); /* HFC ST 2 */
1067 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1068 hc->hw.mst_m |= HFCPCI_MASTER;
1069 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1070 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1071 HFCPCI_DO_ACTION);
1072 l1_event(dch->l1, HW_POWERUP_IND);
1073 break;
1074 case HW_DEACT_REQ:
1075 hc->hw.mst_m &= ~HFCPCI_MASTER;
1076 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1077 skb_queue_purge(&dch->squeue);
1078 if (dch->tx_skb) {
1079 dev_kfree_skb(dch->tx_skb);
1080 dch->tx_skb = NULL;
1081 }
1082 dch->tx_idx = 0;
1083 if (dch->rx_skb) {
1084 dev_kfree_skb(dch->rx_skb);
1085 dch->rx_skb = NULL;
1086 }
1087 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1088 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1089 del_timer(&dch->timer);
1090 break;
1091 case HW_POWERUP_REQ:
1092 Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION);
1093 break;
1094 case PH_ACTIVATE_IND:
1095 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
1096 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1097 GFP_ATOMIC);
1098 break;
1099 case PH_DEACTIVATE_IND:
1100 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
1101 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1102 GFP_ATOMIC);
1103 break;
1104 default:
1105 if (dch->debug & DEBUG_HW)
1106 printk(KERN_DEBUG "%s: unknown command %x\n",
1107 __func__, cmd);
1108 return -1;
1109 }
1110 return 0;
1111 }
1112
1113 /*
1114 * Interrupt handler
1115 */
1116 static inline void
tx_birq(struct bchannel * bch)1117 tx_birq(struct bchannel *bch)
1118 {
1119 if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
1120 hfcpci_fill_fifo(bch);
1121 else {
1122 dev_kfree_skb(bch->tx_skb);
1123 if (get_next_bframe(bch))
1124 hfcpci_fill_fifo(bch);
1125 }
1126 }
1127
1128 static inline void
tx_dirq(struct dchannel * dch)1129 tx_dirq(struct dchannel *dch)
1130 {
1131 if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len)
1132 hfcpci_fill_dfifo(dch->hw);
1133 else {
1134 dev_kfree_skb(dch->tx_skb);
1135 if (get_next_dframe(dch))
1136 hfcpci_fill_dfifo(dch->hw);
1137 }
1138 }
1139
1140 static irqreturn_t
hfcpci_int(int intno,void * dev_id)1141 hfcpci_int(int intno, void *dev_id)
1142 {
1143 struct hfc_pci *hc = dev_id;
1144 u_char exval;
1145 struct bchannel *bch;
1146 u_char val, stat;
1147
1148 spin_lock(&hc->lock);
1149 if (!(hc->hw.int_m2 & 0x08)) {
1150 spin_unlock(&hc->lock);
1151 return IRQ_NONE; /* not initialised */
1152 }
1153 stat = Read_hfc(hc, HFCPCI_STATUS);
1154 if (HFCPCI_ANYINT & stat) {
1155 val = Read_hfc(hc, HFCPCI_INT_S1);
1156 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1157 printk(KERN_DEBUG
1158 "HFC-PCI: stat(%02x) s1(%02x)\n", stat, val);
1159 } else {
1160 /* shared */
1161 spin_unlock(&hc->lock);
1162 return IRQ_NONE;
1163 }
1164 hc->irqcnt++;
1165
1166 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1167 printk(KERN_DEBUG "HFC-PCI irq %x\n", val);
1168 val &= hc->hw.int_m1;
1169 if (val & 0x40) { /* state machine irq */
1170 exval = Read_hfc(hc, HFCPCI_STATES) & 0xf;
1171 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1172 printk(KERN_DEBUG "ph_state chg %d->%d\n",
1173 hc->dch.state, exval);
1174 hc->dch.state = exval;
1175 schedule_event(&hc->dch, FLG_PHCHANGE);
1176 val &= ~0x40;
1177 }
1178 if (val & 0x80) { /* timer irq */
1179 if (hc->hw.protocol == ISDN_P_NT_S0) {
1180 if ((--hc->hw.nt_timer) < 0)
1181 schedule_event(&hc->dch, FLG_PHCHANGE);
1182 }
1183 val &= ~0x80;
1184 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
1185 }
1186 if (val & 0x08) { /* B1 rx */
1187 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1188 if (bch)
1189 main_rec_hfcpci(bch);
1190 else if (hc->dch.debug)
1191 printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
1192 }
1193 if (val & 0x10) { /* B2 rx */
1194 bch = Sel_BCS(hc, 2);
1195 if (bch)
1196 main_rec_hfcpci(bch);
1197 else if (hc->dch.debug)
1198 printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
1199 }
1200 if (val & 0x01) { /* B1 tx */
1201 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1202 if (bch)
1203 tx_birq(bch);
1204 else if (hc->dch.debug)
1205 printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
1206 }
1207 if (val & 0x02) { /* B2 tx */
1208 bch = Sel_BCS(hc, 2);
1209 if (bch)
1210 tx_birq(bch);
1211 else if (hc->dch.debug)
1212 printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
1213 }
1214 if (val & 0x20) /* D rx */
1215 receive_dmsg(hc);
1216 if (val & 0x04) { /* D tx */
1217 if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags))
1218 del_timer(&hc->dch.timer);
1219 tx_dirq(&hc->dch);
1220 }
1221 spin_unlock(&hc->lock);
1222 return IRQ_HANDLED;
1223 }
1224
1225 /*
1226 * timer callback for D-chan busy resolution. Currently no function
1227 */
1228 static void
hfcpci_dbusy_timer(struct timer_list * t)1229 hfcpci_dbusy_timer(struct timer_list *t)
1230 {
1231 }
1232
1233 /*
1234 * activate/deactivate hardware for selected channels and mode
1235 */
1236 static int
mode_hfcpci(struct bchannel * bch,int bc,int protocol)1237 mode_hfcpci(struct bchannel *bch, int bc, int protocol)
1238 {
1239 struct hfc_pci *hc = bch->hw;
1240 int fifo2;
1241 u_char rx_slot = 0, tx_slot = 0, pcm_mode;
1242
1243 if (bch->debug & DEBUG_HW_BCHANNEL)
1244 printk(KERN_DEBUG
1245 "HFCPCI bchannel protocol %x-->%x ch %x-->%x\n",
1246 bch->state, protocol, bch->nr, bc);
1247
1248 fifo2 = bc;
1249 pcm_mode = (bc >> 24) & 0xff;
1250 if (pcm_mode) { /* PCM SLOT USE */
1251 if (!test_bit(HFC_CFG_PCM, &hc->cfg))
1252 printk(KERN_WARNING
1253 "%s: pcm channel id without HFC_CFG_PCM\n",
1254 __func__);
1255 rx_slot = (bc >> 8) & 0xff;
1256 tx_slot = (bc >> 16) & 0xff;
1257 bc = bc & 0xff;
1258 } else if (test_bit(HFC_CFG_PCM, &hc->cfg) && (protocol > ISDN_P_NONE))
1259 printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n",
1260 __func__);
1261 if (hc->chanlimit > 1) {
1262 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1263 hc->hw.sctrl_e &= ~0x80;
1264 } else {
1265 if (bc & 2) {
1266 if (protocol != ISDN_P_NONE) {
1267 hc->hw.bswapped = 1; /* B1 and B2 exchanged */
1268 hc->hw.sctrl_e |= 0x80;
1269 } else {
1270 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1271 hc->hw.sctrl_e &= ~0x80;
1272 }
1273 fifo2 = 1;
1274 } else {
1275 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1276 hc->hw.sctrl_e &= ~0x80;
1277 }
1278 }
1279 switch (protocol) {
1280 case (-1): /* used for init */
1281 bch->state = -1;
1282 bch->nr = bc;
1283 fallthrough;
1284 case (ISDN_P_NONE):
1285 if (bch->state == ISDN_P_NONE)
1286 return 0;
1287 if (bc & 2) {
1288 hc->hw.sctrl &= ~SCTRL_B2_ENA;
1289 hc->hw.sctrl_r &= ~SCTRL_B2_ENA;
1290 } else {
1291 hc->hw.sctrl &= ~SCTRL_B1_ENA;
1292 hc->hw.sctrl_r &= ~SCTRL_B1_ENA;
1293 }
1294 if (fifo2 & 2) {
1295 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
1296 hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS |
1297 HFCPCI_INTS_B2REC);
1298 } else {
1299 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
1300 hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS |
1301 HFCPCI_INTS_B1REC);
1302 }
1303 #ifdef REVERSE_BITORDER
1304 if (bch->nr & 2)
1305 hc->hw.cirm &= 0x7f;
1306 else
1307 hc->hw.cirm &= 0xbf;
1308 #endif
1309 bch->state = ISDN_P_NONE;
1310 bch->nr = bc;
1311 test_and_clear_bit(FLG_HDLC, &bch->Flags);
1312 test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags);
1313 break;
1314 case (ISDN_P_B_RAW):
1315 bch->state = protocol;
1316 bch->nr = bc;
1317 hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
1318 hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
1319 if (bc & 2) {
1320 hc->hw.sctrl |= SCTRL_B2_ENA;
1321 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1322 #ifdef REVERSE_BITORDER
1323 hc->hw.cirm |= 0x80;
1324 #endif
1325 } else {
1326 hc->hw.sctrl |= SCTRL_B1_ENA;
1327 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1328 #ifdef REVERSE_BITORDER
1329 hc->hw.cirm |= 0x40;
1330 #endif
1331 }
1332 if (fifo2 & 2) {
1333 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1334 if (!tics)
1335 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
1336 HFCPCI_INTS_B2REC);
1337 hc->hw.ctmt |= 2;
1338 hc->hw.conn &= ~0x18;
1339 } else {
1340 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1341 if (!tics)
1342 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
1343 HFCPCI_INTS_B1REC);
1344 hc->hw.ctmt |= 1;
1345 hc->hw.conn &= ~0x03;
1346 }
1347 test_and_set_bit(FLG_TRANSPARENT, &bch->Flags);
1348 break;
1349 case (ISDN_P_B_HDLC):
1350 bch->state = protocol;
1351 bch->nr = bc;
1352 hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
1353 hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
1354 if (bc & 2) {
1355 hc->hw.sctrl |= SCTRL_B2_ENA;
1356 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1357 } else {
1358 hc->hw.sctrl |= SCTRL_B1_ENA;
1359 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1360 }
1361 if (fifo2 & 2) {
1362 hc->hw.last_bfifo_cnt[1] = 0;
1363 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1364 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
1365 HFCPCI_INTS_B2REC);
1366 hc->hw.ctmt &= ~2;
1367 hc->hw.conn &= ~0x18;
1368 } else {
1369 hc->hw.last_bfifo_cnt[0] = 0;
1370 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1371 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
1372 HFCPCI_INTS_B1REC);
1373 hc->hw.ctmt &= ~1;
1374 hc->hw.conn &= ~0x03;
1375 }
1376 test_and_set_bit(FLG_HDLC, &bch->Flags);
1377 break;
1378 default:
1379 printk(KERN_DEBUG "prot not known %x\n", protocol);
1380 return -ENOPROTOOPT;
1381 }
1382 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
1383 if ((protocol == ISDN_P_NONE) ||
1384 (protocol == -1)) { /* init case */
1385 rx_slot = 0;
1386 tx_slot = 0;
1387 } else {
1388 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
1389 rx_slot |= 0xC0;
1390 tx_slot |= 0xC0;
1391 } else {
1392 rx_slot |= 0x80;
1393 tx_slot |= 0x80;
1394 }
1395 }
1396 if (bc & 2) {
1397 hc->hw.conn &= 0xc7;
1398 hc->hw.conn |= 0x08;
1399 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n",
1400 __func__, tx_slot);
1401 printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n",
1402 __func__, rx_slot);
1403 Write_hfc(hc, HFCPCI_B2_SSL, tx_slot);
1404 Write_hfc(hc, HFCPCI_B2_RSL, rx_slot);
1405 } else {
1406 hc->hw.conn &= 0xf8;
1407 hc->hw.conn |= 0x01;
1408 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n",
1409 __func__, tx_slot);
1410 printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n",
1411 __func__, rx_slot);
1412 Write_hfc(hc, HFCPCI_B1_SSL, tx_slot);
1413 Write_hfc(hc, HFCPCI_B1_RSL, rx_slot);
1414 }
1415 }
1416 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
1417 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1418 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1419 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
1420 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1421 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1422 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1423 #ifdef REVERSE_BITORDER
1424 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1425 #endif
1426 return 0;
1427 }
1428
1429 static int
set_hfcpci_rxtest(struct bchannel * bch,int protocol,int chan)1430 set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
1431 {
1432 struct hfc_pci *hc = bch->hw;
1433
1434 if (bch->debug & DEBUG_HW_BCHANNEL)
1435 printk(KERN_DEBUG
1436 "HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n",
1437 bch->state, protocol, bch->nr, chan);
1438 if (bch->nr != chan) {
1439 printk(KERN_DEBUG
1440 "HFCPCI rxtest wrong channel parameter %x/%x\n",
1441 bch->nr, chan);
1442 return -EINVAL;
1443 }
1444 switch (protocol) {
1445 case (ISDN_P_B_RAW):
1446 bch->state = protocol;
1447 hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
1448 if (chan & 2) {
1449 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1450 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1451 if (!tics)
1452 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1453 hc->hw.ctmt |= 2;
1454 hc->hw.conn &= ~0x18;
1455 #ifdef REVERSE_BITORDER
1456 hc->hw.cirm |= 0x80;
1457 #endif
1458 } else {
1459 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1460 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1461 if (!tics)
1462 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1463 hc->hw.ctmt |= 1;
1464 hc->hw.conn &= ~0x03;
1465 #ifdef REVERSE_BITORDER
1466 hc->hw.cirm |= 0x40;
1467 #endif
1468 }
1469 break;
1470 case (ISDN_P_B_HDLC):
1471 bch->state = protocol;
1472 hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
1473 if (chan & 2) {
1474 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1475 hc->hw.last_bfifo_cnt[1] = 0;
1476 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1477 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1478 hc->hw.ctmt &= ~2;
1479 hc->hw.conn &= ~0x18;
1480 } else {
1481 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1482 hc->hw.last_bfifo_cnt[0] = 0;
1483 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1484 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1485 hc->hw.ctmt &= ~1;
1486 hc->hw.conn &= ~0x03;
1487 }
1488 break;
1489 default:
1490 printk(KERN_DEBUG "prot not known %x\n", protocol);
1491 return -ENOPROTOOPT;
1492 }
1493 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1494 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1495 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1496 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1497 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1498 #ifdef REVERSE_BITORDER
1499 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1500 #endif
1501 return 0;
1502 }
1503
1504 static void
deactivate_bchannel(struct bchannel * bch)1505 deactivate_bchannel(struct bchannel *bch)
1506 {
1507 struct hfc_pci *hc = bch->hw;
1508 u_long flags;
1509
1510 spin_lock_irqsave(&hc->lock, flags);
1511 mISDN_clear_bchannel(bch);
1512 mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1513 spin_unlock_irqrestore(&hc->lock, flags);
1514 }
1515
1516 /*
1517 * Layer 1 B-channel hardware access
1518 */
1519 static int
channel_bctrl(struct bchannel * bch,struct mISDN_ctrl_req * cq)1520 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1521 {
1522 return mISDN_ctrl_bchannel(bch, cq);
1523 }
1524 static int
hfc_bctrl(struct mISDNchannel * ch,u_int cmd,void * arg)1525 hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1526 {
1527 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1528 struct hfc_pci *hc = bch->hw;
1529 int ret = -EINVAL;
1530 u_long flags;
1531
1532 if (bch->debug & DEBUG_HW)
1533 printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
1534 switch (cmd) {
1535 case HW_TESTRX_RAW:
1536 spin_lock_irqsave(&hc->lock, flags);
1537 ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, (int)(long)arg);
1538 spin_unlock_irqrestore(&hc->lock, flags);
1539 break;
1540 case HW_TESTRX_HDLC:
1541 spin_lock_irqsave(&hc->lock, flags);
1542 ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, (int)(long)arg);
1543 spin_unlock_irqrestore(&hc->lock, flags);
1544 break;
1545 case HW_TESTRX_OFF:
1546 spin_lock_irqsave(&hc->lock, flags);
1547 mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1548 spin_unlock_irqrestore(&hc->lock, flags);
1549 ret = 0;
1550 break;
1551 case CLOSE_CHANNEL:
1552 test_and_clear_bit(FLG_OPEN, &bch->Flags);
1553 deactivate_bchannel(bch);
1554 ch->protocol = ISDN_P_NONE;
1555 ch->peer = NULL;
1556 module_put(THIS_MODULE);
1557 ret = 0;
1558 break;
1559 case CONTROL_CHANNEL:
1560 ret = channel_bctrl(bch, arg);
1561 break;
1562 default:
1563 printk(KERN_WARNING "%s: unknown prim(%x)\n",
1564 __func__, cmd);
1565 }
1566 return ret;
1567 }
1568
1569 /*
1570 * Layer2 -> Layer 1 Dchannel data
1571 */
1572 static int
hfcpci_l2l1D(struct mISDNchannel * ch,struct sk_buff * skb)1573 hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
1574 {
1575 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1576 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1577 struct hfc_pci *hc = dch->hw;
1578 int ret = -EINVAL;
1579 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1580 unsigned int id;
1581 u_long flags;
1582
1583 switch (hh->prim) {
1584 case PH_DATA_REQ:
1585 spin_lock_irqsave(&hc->lock, flags);
1586 ret = dchannel_senddata(dch, skb);
1587 if (ret > 0) { /* direct TX */
1588 id = hh->id; /* skb can be freed */
1589 hfcpci_fill_dfifo(dch->hw);
1590 ret = 0;
1591 spin_unlock_irqrestore(&hc->lock, flags);
1592 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1593 } else
1594 spin_unlock_irqrestore(&hc->lock, flags);
1595 return ret;
1596 case PH_ACTIVATE_REQ:
1597 spin_lock_irqsave(&hc->lock, flags);
1598 if (hc->hw.protocol == ISDN_P_NT_S0) {
1599 ret = 0;
1600 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1601 hc->hw.mst_m |= HFCPCI_MASTER;
1602 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1603 if (test_bit(FLG_ACTIVE, &dch->Flags)) {
1604 spin_unlock_irqrestore(&hc->lock, flags);
1605 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
1606 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1607 break;
1608 }
1609 test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags);
1610 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1611 HFCPCI_DO_ACTION | 1);
1612 } else
1613 ret = l1_event(dch->l1, hh->prim);
1614 spin_unlock_irqrestore(&hc->lock, flags);
1615 break;
1616 case PH_DEACTIVATE_REQ:
1617 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1618 spin_lock_irqsave(&hc->lock, flags);
1619 if (hc->hw.protocol == ISDN_P_NT_S0) {
1620 /* prepare deactivation */
1621 Write_hfc(hc, HFCPCI_STATES, 0x40);
1622 skb_queue_purge(&dch->squeue);
1623 if (dch->tx_skb) {
1624 dev_kfree_skb(dch->tx_skb);
1625 dch->tx_skb = NULL;
1626 }
1627 dch->tx_idx = 0;
1628 if (dch->rx_skb) {
1629 dev_kfree_skb(dch->rx_skb);
1630 dch->rx_skb = NULL;
1631 }
1632 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1633 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1634 del_timer(&dch->timer);
1635 #ifdef FIXME
1636 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
1637 dchannel_sched_event(&hc->dch, D_CLEARBUSY);
1638 #endif
1639 hc->hw.mst_m &= ~HFCPCI_MASTER;
1640 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1641 ret = 0;
1642 } else {
1643 ret = l1_event(dch->l1, hh->prim);
1644 }
1645 spin_unlock_irqrestore(&hc->lock, flags);
1646 break;
1647 }
1648 if (!ret)
1649 dev_kfree_skb(skb);
1650 return ret;
1651 }
1652
1653 /*
1654 * Layer2 -> Layer 1 Bchannel data
1655 */
1656 static int
hfcpci_l2l1B(struct mISDNchannel * ch,struct sk_buff * skb)1657 hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
1658 {
1659 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1660 struct hfc_pci *hc = bch->hw;
1661 int ret = -EINVAL;
1662 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1663 unsigned long flags;
1664
1665 switch (hh->prim) {
1666 case PH_DATA_REQ:
1667 spin_lock_irqsave(&hc->lock, flags);
1668 ret = bchannel_senddata(bch, skb);
1669 if (ret > 0) { /* direct TX */
1670 hfcpci_fill_fifo(bch);
1671 ret = 0;
1672 }
1673 spin_unlock_irqrestore(&hc->lock, flags);
1674 return ret;
1675 case PH_ACTIVATE_REQ:
1676 spin_lock_irqsave(&hc->lock, flags);
1677 if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
1678 ret = mode_hfcpci(bch, bch->nr, ch->protocol);
1679 else
1680 ret = 0;
1681 spin_unlock_irqrestore(&hc->lock, flags);
1682 if (!ret)
1683 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
1684 NULL, GFP_KERNEL);
1685 break;
1686 case PH_DEACTIVATE_REQ:
1687 deactivate_bchannel(bch);
1688 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
1689 NULL, GFP_KERNEL);
1690 ret = 0;
1691 break;
1692 }
1693 if (!ret)
1694 dev_kfree_skb(skb);
1695 return ret;
1696 }
1697
1698 /*
1699 * called for card init message
1700 */
1701
1702 static void
inithfcpci(struct hfc_pci * hc)1703 inithfcpci(struct hfc_pci *hc)
1704 {
1705 printk(KERN_DEBUG "inithfcpci: entered\n");
1706 timer_setup(&hc->dch.timer, hfcpci_dbusy_timer, 0);
1707 hc->chanlimit = 2;
1708 mode_hfcpci(&hc->bch[0], 1, -1);
1709 mode_hfcpci(&hc->bch[1], 2, -1);
1710 }
1711
1712
1713 static int
init_card(struct hfc_pci * hc)1714 init_card(struct hfc_pci *hc)
1715 {
1716 int cnt = 3;
1717 u_long flags;
1718
1719 printk(KERN_DEBUG "init_card: entered\n");
1720
1721
1722 spin_lock_irqsave(&hc->lock, flags);
1723 disable_hwirq(hc);
1724 spin_unlock_irqrestore(&hc->lock, flags);
1725 if (request_irq(hc->irq, hfcpci_int, IRQF_SHARED, "HFC PCI", hc)) {
1726 printk(KERN_WARNING
1727 "mISDN: couldn't get interrupt %d\n", hc->irq);
1728 return -EIO;
1729 }
1730 spin_lock_irqsave(&hc->lock, flags);
1731 reset_hfcpci(hc);
1732 while (cnt) {
1733 inithfcpci(hc);
1734 /*
1735 * Finally enable IRQ output
1736 * this is only allowed, if an IRQ routine is already
1737 * established for this HFC, so don't do that earlier
1738 */
1739 enable_hwirq(hc);
1740 spin_unlock_irqrestore(&hc->lock, flags);
1741 /* Timeout 80ms */
1742 set_current_state(TASK_UNINTERRUPTIBLE);
1743 schedule_timeout((80 * HZ) / 1000);
1744 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1745 hc->irq, hc->irqcnt);
1746 /* now switch timer interrupt off */
1747 spin_lock_irqsave(&hc->lock, flags);
1748 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1749 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1750 /* reinit mode reg */
1751 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1752 if (!hc->irqcnt) {
1753 printk(KERN_WARNING
1754 "HFC PCI: IRQ(%d) getting no interrupts "
1755 "during init %d\n", hc->irq, 4 - cnt);
1756 if (cnt == 1)
1757 break;
1758 else {
1759 reset_hfcpci(hc);
1760 cnt--;
1761 }
1762 } else {
1763 spin_unlock_irqrestore(&hc->lock, flags);
1764 hc->initdone = 1;
1765 return 0;
1766 }
1767 }
1768 disable_hwirq(hc);
1769 spin_unlock_irqrestore(&hc->lock, flags);
1770 free_irq(hc->irq, hc);
1771 return -EIO;
1772 }
1773
1774 static int
channel_ctrl(struct hfc_pci * hc,struct mISDN_ctrl_req * cq)1775 channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1776 {
1777 int ret = 0;
1778 u_char slot;
1779
1780 switch (cq->op) {
1781 case MISDN_CTRL_GETOP:
1782 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1783 MISDN_CTRL_DISCONNECT | MISDN_CTRL_L1_TIMER3;
1784 break;
1785 case MISDN_CTRL_LOOP:
1786 /* channel 0 disabled loop */
1787 if (cq->channel < 0 || cq->channel > 2) {
1788 ret = -EINVAL;
1789 break;
1790 }
1791 if (cq->channel & 1) {
1792 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1793 slot = 0xC0;
1794 else
1795 slot = 0x80;
1796 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1797 __func__, slot);
1798 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1799 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1800 hc->hw.conn = (hc->hw.conn & ~7) | 6;
1801 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1802 }
1803 if (cq->channel & 2) {
1804 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1805 slot = 0xC1;
1806 else
1807 slot = 0x81;
1808 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1809 __func__, slot);
1810 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1811 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1812 hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30;
1813 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1814 }
1815 if (cq->channel & 3)
1816 hc->hw.trm |= 0x80; /* enable IOM-loop */
1817 else {
1818 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1819 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1820 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1821 }
1822 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1823 break;
1824 case MISDN_CTRL_CONNECT:
1825 if (cq->channel == cq->p1) {
1826 ret = -EINVAL;
1827 break;
1828 }
1829 if (cq->channel < 1 || cq->channel > 2 ||
1830 cq->p1 < 1 || cq->p1 > 2) {
1831 ret = -EINVAL;
1832 break;
1833 }
1834 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1835 slot = 0xC0;
1836 else
1837 slot = 0x80;
1838 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1839 __func__, slot);
1840 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1841 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1842 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1843 slot = 0xC1;
1844 else
1845 slot = 0x81;
1846 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1847 __func__, slot);
1848 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1849 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1850 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36;
1851 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1852 hc->hw.trm |= 0x80;
1853 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1854 break;
1855 case MISDN_CTRL_DISCONNECT:
1856 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1857 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1858 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1859 break;
1860 case MISDN_CTRL_L1_TIMER3:
1861 ret = l1_event(hc->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
1862 break;
1863 default:
1864 printk(KERN_WARNING "%s: unknown Op %x\n",
1865 __func__, cq->op);
1866 ret = -EINVAL;
1867 break;
1868 }
1869 return ret;
1870 }
1871
1872 static int
open_dchannel(struct hfc_pci * hc,struct mISDNchannel * ch,struct channel_req * rq)1873 open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
1874 struct channel_req *rq)
1875 {
1876 int err = 0;
1877
1878 if (debug & DEBUG_HW_OPEN)
1879 printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
1880 hc->dch.dev.id, __builtin_return_address(0));
1881 if (rq->protocol == ISDN_P_NONE)
1882 return -EINVAL;
1883 if (rq->adr.channel == 1) {
1884 /* TODO: E-Channel */
1885 return -EINVAL;
1886 }
1887 if (!hc->initdone) {
1888 if (rq->protocol == ISDN_P_TE_S0) {
1889 err = create_l1(&hc->dch, hfc_l1callback);
1890 if (err)
1891 return err;
1892 }
1893 hc->hw.protocol = rq->protocol;
1894 ch->protocol = rq->protocol;
1895 err = init_card(hc);
1896 if (err)
1897 return err;
1898 } else {
1899 if (rq->protocol != ch->protocol) {
1900 if (hc->hw.protocol == ISDN_P_TE_S0)
1901 l1_event(hc->dch.l1, CLOSE_CHANNEL);
1902 if (rq->protocol == ISDN_P_TE_S0) {
1903 err = create_l1(&hc->dch, hfc_l1callback);
1904 if (err)
1905 return err;
1906 }
1907 hc->hw.protocol = rq->protocol;
1908 ch->protocol = rq->protocol;
1909 hfcpci_setmode(hc);
1910 }
1911 }
1912
1913 if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) ||
1914 ((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) {
1915 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
1916 0, NULL, GFP_KERNEL);
1917 }
1918 rq->ch = ch;
1919 if (!try_module_get(THIS_MODULE))
1920 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1921 return 0;
1922 }
1923
1924 static int
open_bchannel(struct hfc_pci * hc,struct channel_req * rq)1925 open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
1926 {
1927 struct bchannel *bch;
1928
1929 if (rq->adr.channel == 0 || rq->adr.channel > 2)
1930 return -EINVAL;
1931 if (rq->protocol == ISDN_P_NONE)
1932 return -EINVAL;
1933 bch = &hc->bch[rq->adr.channel - 1];
1934 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
1935 return -EBUSY; /* b-channel can be only open once */
1936 bch->ch.protocol = rq->protocol;
1937 rq->ch = &bch->ch; /* TODO: E-channel */
1938 if (!try_module_get(THIS_MODULE))
1939 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1940 return 0;
1941 }
1942
1943 /*
1944 * device control function
1945 */
1946 static int
hfc_dctrl(struct mISDNchannel * ch,u_int cmd,void * arg)1947 hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1948 {
1949 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1950 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1951 struct hfc_pci *hc = dch->hw;
1952 struct channel_req *rq;
1953 int err = 0;
1954
1955 if (dch->debug & DEBUG_HW)
1956 printk(KERN_DEBUG "%s: cmd:%x %p\n",
1957 __func__, cmd, arg);
1958 switch (cmd) {
1959 case OPEN_CHANNEL:
1960 rq = arg;
1961 if ((rq->protocol == ISDN_P_TE_S0) ||
1962 (rq->protocol == ISDN_P_NT_S0))
1963 err = open_dchannel(hc, ch, rq);
1964 else
1965 err = open_bchannel(hc, rq);
1966 break;
1967 case CLOSE_CHANNEL:
1968 if (debug & DEBUG_HW_OPEN)
1969 printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
1970 __func__, hc->dch.dev.id,
1971 __builtin_return_address(0));
1972 module_put(THIS_MODULE);
1973 break;
1974 case CONTROL_CHANNEL:
1975 err = channel_ctrl(hc, arg);
1976 break;
1977 default:
1978 if (dch->debug & DEBUG_HW)
1979 printk(KERN_DEBUG "%s: unknown command %x\n",
1980 __func__, cmd);
1981 return -EINVAL;
1982 }
1983 return err;
1984 }
1985
1986 static int
setup_hw(struct hfc_pci * hc)1987 setup_hw(struct hfc_pci *hc)
1988 {
1989 void *buffer;
1990
1991 printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision);
1992 hc->hw.cirm = 0;
1993 hc->dch.state = 0;
1994 pci_set_master(hc->pdev);
1995 if (!hc->irq) {
1996 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1997 return 1;
1998 }
1999 hc->hw.pci_io =
2000 (char __iomem *)(unsigned long)hc->pdev->resource[1].start;
2001
2002 if (!hc->hw.pci_io) {
2003 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
2004 return 1;
2005 }
2006 /* Allocate memory for FIFOS */
2007 /* the memory needs to be on a 32k boundary within the first 4G */
2008 dma_set_mask(&hc->pdev->dev, 0xFFFF8000);
2009 buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle,
2010 GFP_KERNEL);
2011 /* We silently assume the address is okay if nonzero */
2012 if (!buffer) {
2013 printk(KERN_WARNING
2014 "HFC-PCI: Error allocating memory for FIFO!\n");
2015 return 1;
2016 }
2017 hc->hw.fifos = buffer;
2018 pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
2019 hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
2020 if (unlikely(!hc->hw.pci_io)) {
2021 printk(KERN_WARNING
2022 "HFC-PCI: Error in ioremap for PCI!\n");
2023 dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
2024 hc->hw.dmahandle);
2025 return 1;
2026 }
2027
2028 printk(KERN_INFO
2029 "HFC-PCI: defined at mem %#lx fifo %p(%pad) IRQ %d HZ %d\n",
2030 (u_long) hc->hw.pci_io, hc->hw.fifos,
2031 &hc->hw.dmahandle, hc->irq, HZ);
2032
2033 /* enable memory mapped ports, disable busmaster */
2034 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
2035 hc->hw.int_m2 = 0;
2036 disable_hwirq(hc);
2037 hc->hw.int_m1 = 0;
2038 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
2039 /* At this point the needed PCI config is done */
2040 /* fifos are still not enabled */
2041 timer_setup(&hc->hw.timer, hfcpci_Timer, 0);
2042 /* default PCM master */
2043 test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
2044 return 0;
2045 }
2046
2047 static void
release_card(struct hfc_pci * hc)2048 release_card(struct hfc_pci *hc) {
2049 u_long flags;
2050
2051 spin_lock_irqsave(&hc->lock, flags);
2052 hc->hw.int_m2 = 0; /* interrupt output off ! */
2053 disable_hwirq(hc);
2054 mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE);
2055 mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE);
2056 if (hc->dch.timer.function != NULL) {
2057 del_timer(&hc->dch.timer);
2058 hc->dch.timer.function = NULL;
2059 }
2060 spin_unlock_irqrestore(&hc->lock, flags);
2061 if (hc->hw.protocol == ISDN_P_TE_S0)
2062 l1_event(hc->dch.l1, CLOSE_CHANNEL);
2063 if (hc->initdone)
2064 free_irq(hc->irq, hc);
2065 release_io_hfcpci(hc); /* must release after free_irq! */
2066 mISDN_unregister_device(&hc->dch.dev);
2067 mISDN_freebchannel(&hc->bch[1]);
2068 mISDN_freebchannel(&hc->bch[0]);
2069 mISDN_freedchannel(&hc->dch);
2070 pci_set_drvdata(hc->pdev, NULL);
2071 kfree(hc);
2072 }
2073
2074 static int
setup_card(struct hfc_pci * card)2075 setup_card(struct hfc_pci *card)
2076 {
2077 int err = -EINVAL;
2078 u_int i;
2079 char name[MISDN_MAX_IDLEN];
2080
2081 card->dch.debug = debug;
2082 spin_lock_init(&card->lock);
2083 mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
2084 card->dch.hw = card;
2085 card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
2086 card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
2087 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
2088 card->dch.dev.D.send = hfcpci_l2l1D;
2089 card->dch.dev.D.ctrl = hfc_dctrl;
2090 card->dch.dev.nrbchan = 2;
2091 for (i = 0; i < 2; i++) {
2092 card->bch[i].nr = i + 1;
2093 set_channelmap(i + 1, card->dch.dev.channelmap);
2094 card->bch[i].debug = debug;
2095 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, poll >> 1);
2096 card->bch[i].hw = card;
2097 card->bch[i].ch.send = hfcpci_l2l1B;
2098 card->bch[i].ch.ctrl = hfc_bctrl;
2099 card->bch[i].ch.nr = i + 1;
2100 list_add(&card->bch[i].ch.list, &card->dch.dev.bchannels);
2101 }
2102 err = setup_hw(card);
2103 if (err)
2104 goto error;
2105 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1);
2106 err = mISDN_register_device(&card->dch.dev, &card->pdev->dev, name);
2107 if (err)
2108 goto error;
2109 HFC_cnt++;
2110 printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
2111 return 0;
2112 error:
2113 mISDN_freebchannel(&card->bch[1]);
2114 mISDN_freebchannel(&card->bch[0]);
2115 mISDN_freedchannel(&card->dch);
2116 kfree(card);
2117 return err;
2118 }
2119
2120 /* private data in the PCI devices list */
2121 struct _hfc_map {
2122 u_int subtype;
2123 u_int flag;
2124 char *name;
2125 };
2126
2127 static const struct _hfc_map hfc_map[] =
2128 {
2129 {HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"},
2130 {HFC_CCD_B000, 0, "Billion B000"},
2131 {HFC_CCD_B006, 0, "Billion B006"},
2132 {HFC_CCD_B007, 0, "Billion B007"},
2133 {HFC_CCD_B008, 0, "Billion B008"},
2134 {HFC_CCD_B009, 0, "Billion B009"},
2135 {HFC_CCD_B00A, 0, "Billion B00A"},
2136 {HFC_CCD_B00B, 0, "Billion B00B"},
2137 {HFC_CCD_B00C, 0, "Billion B00C"},
2138 {HFC_CCD_B100, 0, "Seyeon B100"},
2139 {HFC_CCD_B700, 0, "Primux II S0 B700"},
2140 {HFC_CCD_B701, 0, "Primux II S0 NT B701"},
2141 {HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"},
2142 {HFC_ASUS_0675, 0, "Asuscom/Askey 675"},
2143 {HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"},
2144 {HFC_BERKOM_A1T, 0, "German telekom A1T"},
2145 {HFC_ANIGMA_MC145575, 0, "Motorola MC145575"},
2146 {HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"},
2147 {HFC_DIGI_DF_M_IOM2_E, 0,
2148 "Digi International DataFire Micro V IOM2 (Europe)"},
2149 {HFC_DIGI_DF_M_E, 0,
2150 "Digi International DataFire Micro V (Europe)"},
2151 {HFC_DIGI_DF_M_IOM2_A, 0,
2152 "Digi International DataFire Micro V IOM2 (North America)"},
2153 {HFC_DIGI_DF_M_A, 0,
2154 "Digi International DataFire Micro V (North America)"},
2155 {HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"},
2156 {},
2157 };
2158
2159 static const struct pci_device_id hfc_ids[] =
2160 {
2161 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
2162 (unsigned long) &hfc_map[0] },
2163 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B000),
2164 (unsigned long) &hfc_map[1] },
2165 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B006),
2166 (unsigned long) &hfc_map[2] },
2167 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B007),
2168 (unsigned long) &hfc_map[3] },
2169 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B008),
2170 (unsigned long) &hfc_map[4] },
2171 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B009),
2172 (unsigned long) &hfc_map[5] },
2173 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00A),
2174 (unsigned long) &hfc_map[6] },
2175 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00B),
2176 (unsigned long) &hfc_map[7] },
2177 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00C),
2178 (unsigned long) &hfc_map[8] },
2179 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B100),
2180 (unsigned long) &hfc_map[9] },
2181 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B700),
2182 (unsigned long) &hfc_map[10] },
2183 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B701),
2184 (unsigned long) &hfc_map[11] },
2185 { PCI_VDEVICE(ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1),
2186 (unsigned long) &hfc_map[12] },
2187 { PCI_VDEVICE(ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675),
2188 (unsigned long) &hfc_map[13] },
2189 { PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT),
2190 (unsigned long) &hfc_map[14] },
2191 { PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_A1T),
2192 (unsigned long) &hfc_map[15] },
2193 { PCI_VDEVICE(ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575),
2194 (unsigned long) &hfc_map[16] },
2195 { PCI_VDEVICE(ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0),
2196 (unsigned long) &hfc_map[17] },
2197 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E),
2198 (unsigned long) &hfc_map[18] },
2199 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_E),
2200 (unsigned long) &hfc_map[19] },
2201 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A),
2202 (unsigned long) &hfc_map[20] },
2203 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_A),
2204 (unsigned long) &hfc_map[21] },
2205 { PCI_VDEVICE(SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2),
2206 (unsigned long) &hfc_map[22] },
2207 {},
2208 };
2209
2210 static int
hfc_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2211 hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2212 {
2213 int err = -ENOMEM;
2214 struct hfc_pci *card;
2215 struct _hfc_map *m = (struct _hfc_map *)ent->driver_data;
2216
2217 card = kzalloc(sizeof(struct hfc_pci), GFP_KERNEL);
2218 if (!card) {
2219 printk(KERN_ERR "No kmem for HFC card\n");
2220 return err;
2221 }
2222 card->pdev = pdev;
2223 card->subtype = m->subtype;
2224 err = pci_enable_device(pdev);
2225 if (err) {
2226 kfree(card);
2227 return err;
2228 }
2229
2230 printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n",
2231 m->name, pci_name(pdev));
2232
2233 card->irq = pdev->irq;
2234 pci_set_drvdata(pdev, card);
2235 err = setup_card(card);
2236 if (err)
2237 pci_set_drvdata(pdev, NULL);
2238 return err;
2239 }
2240
2241 static void
hfc_remove_pci(struct pci_dev * pdev)2242 hfc_remove_pci(struct pci_dev *pdev)
2243 {
2244 struct hfc_pci *card = pci_get_drvdata(pdev);
2245
2246 if (card)
2247 release_card(card);
2248 else
2249 if (debug)
2250 printk(KERN_DEBUG "%s: drvdata already removed\n",
2251 __func__);
2252 }
2253
2254
2255 static struct pci_driver hfc_driver = {
2256 .name = "hfcpci",
2257 .probe = hfc_probe,
2258 .remove = hfc_remove_pci,
2259 .id_table = hfc_ids,
2260 };
2261
2262 static int
_hfcpci_softirq(struct device * dev,void * unused)2263 _hfcpci_softirq(struct device *dev, void *unused)
2264 {
2265 struct hfc_pci *hc = dev_get_drvdata(dev);
2266 struct bchannel *bch;
2267 if (hc == NULL)
2268 return 0;
2269
2270 if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
2271 spin_lock(&hc->lock);
2272 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
2273 if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
2274 main_rec_hfcpci(bch);
2275 tx_birq(bch);
2276 }
2277 bch = Sel_BCS(hc, hc->hw.bswapped ? 1 : 2);
2278 if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */
2279 main_rec_hfcpci(bch);
2280 tx_birq(bch);
2281 }
2282 spin_unlock(&hc->lock);
2283 }
2284 return 0;
2285 }
2286
2287 static void
hfcpci_softirq(struct timer_list * unused)2288 hfcpci_softirq(struct timer_list *unused)
2289 {
2290 WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, NULL,
2291 _hfcpci_softirq) != 0);
2292
2293 /* if next event would be in the past ... */
2294 if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
2295 hfc_jiffies = jiffies + 1;
2296 else
2297 hfc_jiffies += tics;
2298 hfc_tl.expires = hfc_jiffies;
2299 add_timer(&hfc_tl);
2300 }
2301
2302 static int __init
HFC_init(void)2303 HFC_init(void)
2304 {
2305 int err;
2306
2307 if (!poll)
2308 poll = HFCPCI_BTRANS_THRESHOLD;
2309
2310 if (poll != HFCPCI_BTRANS_THRESHOLD) {
2311 tics = (poll * HZ) / 8000;
2312 if (tics < 1)
2313 tics = 1;
2314 poll = (tics * 8000) / HZ;
2315 if (poll > 256 || poll < 8) {
2316 printk(KERN_ERR "%s: Wrong poll value %d not in range "
2317 "of 8..256.\n", __func__, poll);
2318 err = -EINVAL;
2319 return err;
2320 }
2321 }
2322 if (poll != HFCPCI_BTRANS_THRESHOLD) {
2323 printk(KERN_INFO "%s: Using alternative poll value of %d\n",
2324 __func__, poll);
2325 timer_setup(&hfc_tl, hfcpci_softirq, 0);
2326 hfc_tl.expires = jiffies + tics;
2327 hfc_jiffies = hfc_tl.expires;
2328 add_timer(&hfc_tl);
2329 } else
2330 tics = 0; /* indicate the use of controller's timer */
2331
2332 err = pci_register_driver(&hfc_driver);
2333 if (err) {
2334 if (timer_pending(&hfc_tl))
2335 del_timer(&hfc_tl);
2336 }
2337
2338 return err;
2339 }
2340
2341 static void __exit
HFC_cleanup(void)2342 HFC_cleanup(void)
2343 {
2344 if (timer_pending(&hfc_tl))
2345 del_timer(&hfc_tl);
2346
2347 pci_unregister_driver(&hfc_driver);
2348 }
2349
2350 module_init(HFC_init);
2351 module_exit(HFC_cleanup);
2352
2353 MODULE_DEVICE_TABLE(pci, hfc_ids);
2354