1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Support for the Tundra TSI148 VME-PCI Bridge Chip
4 *
5 * Author: Martyn Welch <martyn.welch@ge.com>
6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 *
8 * Based on work by Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 */
11
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/mm.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/proc_fs.h>
18 #include <linux/pci.h>
19 #include <linux/poll.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/time.h>
26 #include <linux/io.h>
27 #include <linux/uaccess.h>
28 #include <linux/byteorder/generic.h>
29
30 #include "vme.h"
31 #include "vme_bridge.h"
32 #include "vme_tsi148.h"
33
34 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
35 static void tsi148_remove(struct pci_dev *);
36
37
38 /* Module parameter */
39 static bool err_chk;
40 static int geoid;
41
42 static const char driver_name[] = "vme_tsi148";
43
44 static const struct pci_device_id tsi148_ids[] = {
45 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
46 { },
47 };
48
49 MODULE_DEVICE_TABLE(pci, tsi148_ids);
50
51 static struct pci_driver tsi148_driver = {
52 .name = driver_name,
53 .id_table = tsi148_ids,
54 .probe = tsi148_probe,
55 .remove = tsi148_remove,
56 };
57
reg_join(unsigned int high,unsigned int low,unsigned long long * variable)58 static void reg_join(unsigned int high, unsigned int low,
59 unsigned long long *variable)
60 {
61 *variable = (unsigned long long)high << 32;
62 *variable |= (unsigned long long)low;
63 }
64
reg_split(unsigned long long variable,unsigned int * high,unsigned int * low)65 static void reg_split(unsigned long long variable, unsigned int *high,
66 unsigned int *low)
67 {
68 *low = (unsigned int)variable & 0xFFFFFFFF;
69 *high = (unsigned int)(variable >> 32);
70 }
71
72 /*
73 * Wakes up DMA queue.
74 */
tsi148_DMA_irqhandler(struct tsi148_driver * bridge,int channel_mask)75 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
76 int channel_mask)
77 {
78 u32 serviced = 0;
79
80 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
81 wake_up(&bridge->dma_queue[0]);
82 serviced |= TSI148_LCSR_INTC_DMA0C;
83 }
84 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
85 wake_up(&bridge->dma_queue[1]);
86 serviced |= TSI148_LCSR_INTC_DMA1C;
87 }
88
89 return serviced;
90 }
91
92 /*
93 * Wake up location monitor queue
94 */
tsi148_LM_irqhandler(struct tsi148_driver * bridge,u32 stat)95 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
96 {
97 int i;
98 u32 serviced = 0;
99
100 for (i = 0; i < 4; i++) {
101 if (stat & TSI148_LCSR_INTS_LMS[i]) {
102 /* We only enable interrupts if the callback is set */
103 bridge->lm_callback[i](bridge->lm_data[i]);
104 serviced |= TSI148_LCSR_INTC_LMC[i];
105 }
106 }
107
108 return serviced;
109 }
110
111 /*
112 * Wake up mail box queue.
113 *
114 * XXX This functionality is not exposed up though API.
115 */
tsi148_MB_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)116 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
117 {
118 int i;
119 u32 val;
120 u32 serviced = 0;
121 struct tsi148_driver *bridge;
122
123 bridge = tsi148_bridge->driver_priv;
124
125 for (i = 0; i < 4; i++) {
126 if (stat & TSI148_LCSR_INTS_MBS[i]) {
127 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
128 dev_err(tsi148_bridge->parent, "VME Mailbox %d received: 0x%x\n",
129 i, val);
130 serviced |= TSI148_LCSR_INTC_MBC[i];
131 }
132 }
133
134 return serviced;
135 }
136
137 /*
138 * Display error & status message when PERR (PCI) exception interrupt occurs.
139 */
tsi148_PERR_irqhandler(struct vme_bridge * tsi148_bridge)140 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
141 {
142 struct tsi148_driver *bridge;
143
144 bridge = tsi148_bridge->driver_priv;
145
146 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
147 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
148 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
149 ioread32be(bridge->base + TSI148_LCSR_EDPAT));
150
151 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
152 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
153 ioread32be(bridge->base + TSI148_LCSR_EDPXS));
154
155 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
156
157 return TSI148_LCSR_INTC_PERRC;
158 }
159
160 /*
161 * Save address and status when VME error interrupt occurs.
162 */
tsi148_VERR_irqhandler(struct vme_bridge * tsi148_bridge)163 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
164 {
165 unsigned int error_addr_high, error_addr_low;
166 unsigned long long error_addr;
167 u32 error_attrib;
168 int error_am;
169 struct tsi148_driver *bridge;
170
171 bridge = tsi148_bridge->driver_priv;
172
173 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
174 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
175 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
176 error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8;
177
178 reg_join(error_addr_high, error_addr_low, &error_addr);
179
180 /* Check for exception register overflow (we have lost error data) */
181 if (error_attrib & TSI148_LCSR_VEAT_VEOF)
182 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow Occurred\n");
183
184 if (err_chk)
185 vme_bus_error_handler(tsi148_bridge, error_addr, error_am);
186 else
187 dev_err(tsi148_bridge->parent,
188 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
189 error_addr, error_attrib);
190
191 /* Clear Status */
192 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
193
194 return TSI148_LCSR_INTC_VERRC;
195 }
196
197 /*
198 * Wake up IACK queue.
199 */
tsi148_IACK_irqhandler(struct tsi148_driver * bridge)200 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
201 {
202 wake_up(&bridge->iack_queue);
203
204 return TSI148_LCSR_INTC_IACKC;
205 }
206
207 /*
208 * Calling VME bus interrupt callback if provided.
209 */
tsi148_VIRQ_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)210 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
211 u32 stat)
212 {
213 int vec, i, serviced = 0;
214 struct tsi148_driver *bridge;
215
216 bridge = tsi148_bridge->driver_priv;
217
218 for (i = 7; i > 0; i--) {
219 if (stat & (1 << i)) {
220 /*
221 * Note: Even though the registers are defined as
222 * 32-bits in the spec, we only want to issue 8-bit
223 * IACK cycles on the bus, read from offset 3.
224 */
225 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
226
227 vme_irq_handler(tsi148_bridge, i, vec);
228
229 serviced |= (1 << i);
230 }
231 }
232
233 return serviced;
234 }
235
236 /*
237 * Top level interrupt handler. Clears appropriate interrupt status bits and
238 * then calls appropriate sub handler(s).
239 */
tsi148_irqhandler(int irq,void * ptr)240 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
241 {
242 u32 stat, enable, serviced = 0;
243 struct vme_bridge *tsi148_bridge;
244 struct tsi148_driver *bridge;
245
246 tsi148_bridge = ptr;
247
248 bridge = tsi148_bridge->driver_priv;
249
250 /* Determine which interrupts are unmasked and set */
251 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
252 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
253
254 /* Only look at unmasked interrupts */
255 stat &= enable;
256
257 if (unlikely(!stat))
258 return IRQ_NONE;
259
260 /* Call subhandlers as appropriate */
261 /* DMA irqs */
262 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
263 serviced |= tsi148_DMA_irqhandler(bridge, stat);
264
265 /* Location monitor irqs */
266 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
267 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
268 serviced |= tsi148_LM_irqhandler(bridge, stat);
269
270 /* Mail box irqs */
271 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
272 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
273 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
274
275 /* PCI bus error */
276 if (stat & TSI148_LCSR_INTS_PERRS)
277 serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
278
279 /* VME bus error */
280 if (stat & TSI148_LCSR_INTS_VERRS)
281 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
282
283 /* IACK irq */
284 if (stat & TSI148_LCSR_INTS_IACKS)
285 serviced |= tsi148_IACK_irqhandler(bridge);
286
287 /* VME bus irqs */
288 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
289 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
290 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
291 TSI148_LCSR_INTS_IRQ1S))
292 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
293
294 /* Clear serviced interrupts */
295 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
296
297 return IRQ_HANDLED;
298 }
299
tsi148_irq_init(struct vme_bridge * tsi148_bridge)300 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
301 {
302 int result;
303 unsigned int tmp;
304 struct pci_dev *pdev;
305 struct tsi148_driver *bridge;
306
307 pdev = to_pci_dev(tsi148_bridge->parent);
308
309 bridge = tsi148_bridge->driver_priv;
310
311 result = request_irq(pdev->irq,
312 tsi148_irqhandler,
313 IRQF_SHARED,
314 driver_name, tsi148_bridge);
315 if (result) {
316 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq vector %02X\n",
317 pdev->irq);
318 return result;
319 }
320
321 /* Enable and unmask interrupts */
322 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
323 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
324 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
325 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
326 TSI148_LCSR_INTEO_IACKEO;
327
328 /* This leaves the following interrupts masked.
329 * TSI148_LCSR_INTEO_VIEEO
330 * TSI148_LCSR_INTEO_SYSFLEO
331 * TSI148_LCSR_INTEO_ACFLEO
332 */
333
334 /* Don't enable Location Monitor interrupts here - they will be
335 * enabled when the location monitors are properly configured and
336 * a callback has been attached.
337 * TSI148_LCSR_INTEO_LM0EO
338 * TSI148_LCSR_INTEO_LM1EO
339 * TSI148_LCSR_INTEO_LM2EO
340 * TSI148_LCSR_INTEO_LM3EO
341 */
342
343 /* Don't enable VME interrupts until we add a handler, else the board
344 * will respond to it and we don't want that unless it knows how to
345 * properly deal with it.
346 * TSI148_LCSR_INTEO_IRQ7EO
347 * TSI148_LCSR_INTEO_IRQ6EO
348 * TSI148_LCSR_INTEO_IRQ5EO
349 * TSI148_LCSR_INTEO_IRQ4EO
350 * TSI148_LCSR_INTEO_IRQ3EO
351 * TSI148_LCSR_INTEO_IRQ2EO
352 * TSI148_LCSR_INTEO_IRQ1EO
353 */
354
355 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
356 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
357
358 return 0;
359 }
360
tsi148_irq_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)361 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
362 struct pci_dev *pdev)
363 {
364 struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
365
366 /* Turn off interrupts */
367 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
368 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
369
370 /* Clear all interrupts */
371 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
372
373 /* Detach interrupt handler */
374 free_irq(pdev->irq, tsi148_bridge);
375 }
376
377 /*
378 * Check to see if an IACk has been received, return true (1) or false (0).
379 */
tsi148_iack_received(struct tsi148_driver * bridge)380 static int tsi148_iack_received(struct tsi148_driver *bridge)
381 {
382 u32 tmp;
383
384 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
385
386 if (tmp & TSI148_LCSR_VICR_IRQS)
387 return 0;
388 else
389 return 1;
390 }
391
392 /*
393 * Configure VME interrupt
394 */
tsi148_irq_set(struct vme_bridge * tsi148_bridge,int level,int state,int sync)395 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
396 int state, int sync)
397 {
398 struct pci_dev *pdev;
399 u32 tmp;
400 struct tsi148_driver *bridge;
401
402 bridge = tsi148_bridge->driver_priv;
403
404 /* We need to do the ordering differently for enabling and disabling */
405 if (state == 0) {
406 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
407 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
408 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
409
410 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
411 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
412 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
413
414 if (sync != 0) {
415 pdev = to_pci_dev(tsi148_bridge->parent);
416 synchronize_irq(pdev->irq);
417 }
418 } else {
419 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
420 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
421 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
422
423 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
424 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
425 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
426 }
427 }
428
429 /*
430 * Generate a VME bus interrupt at the requested level & vector. Wait for
431 * interrupt to be acked.
432 */
tsi148_irq_generate(struct vme_bridge * tsi148_bridge,int level,int statid)433 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
434 int statid)
435 {
436 u32 tmp;
437 struct tsi148_driver *bridge;
438
439 bridge = tsi148_bridge->driver_priv;
440
441 mutex_lock(&bridge->vme_int);
442
443 /* Read VICR register */
444 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
445
446 /* Set Status/ID */
447 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
448 (statid & TSI148_LCSR_VICR_STID_M);
449 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
450
451 /* Assert VMEbus IRQ */
452 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
453 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
454
455 /* XXX Consider implementing a timeout? */
456 wait_event_interruptible(bridge->iack_queue,
457 tsi148_iack_received(bridge));
458
459 mutex_unlock(&bridge->vme_int);
460
461 return 0;
462 }
463
464 /*
465 * Initialize a slave window with the requested attributes.
466 */
tsi148_slave_set(struct vme_slave_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,dma_addr_t pci_base,u32 aspace,u32 cycle)467 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
468 unsigned long long vme_base, unsigned long long size,
469 dma_addr_t pci_base, u32 aspace, u32 cycle)
470 {
471 unsigned int i, addr = 0, granularity = 0;
472 unsigned int temp_ctl = 0;
473 unsigned int vme_base_low, vme_base_high;
474 unsigned int vme_bound_low, vme_bound_high;
475 unsigned int pci_offset_low, pci_offset_high;
476 unsigned long long vme_bound, pci_offset;
477 struct vme_bridge *tsi148_bridge;
478 struct tsi148_driver *bridge;
479
480 tsi148_bridge = image->parent;
481 bridge = tsi148_bridge->driver_priv;
482
483 i = image->number;
484
485 switch (aspace) {
486 case VME_A16:
487 granularity = 0x10;
488 addr |= TSI148_LCSR_ITAT_AS_A16;
489 break;
490 case VME_A24:
491 granularity = 0x1000;
492 addr |= TSI148_LCSR_ITAT_AS_A24;
493 break;
494 case VME_A32:
495 granularity = 0x10000;
496 addr |= TSI148_LCSR_ITAT_AS_A32;
497 break;
498 case VME_A64:
499 granularity = 0x10000;
500 addr |= TSI148_LCSR_ITAT_AS_A64;
501 break;
502 default:
503 dev_err(tsi148_bridge->parent, "Invalid address space\n");
504 return -EINVAL;
505 }
506
507 /* Convert 64-bit variables to 2x 32-bit variables */
508 reg_split(vme_base, &vme_base_high, &vme_base_low);
509
510 /*
511 * Bound address is a valid address for the window, adjust
512 * accordingly
513 */
514 vme_bound = vme_base + size - granularity;
515 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
516 pci_offset = (unsigned long long)pci_base - vme_base;
517 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
518
519 if (vme_base_low & (granularity - 1)) {
520 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
521 return -EINVAL;
522 }
523 if (vme_bound_low & (granularity - 1)) {
524 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
525 return -EINVAL;
526 }
527 if (pci_offset_low & (granularity - 1)) {
528 dev_err(tsi148_bridge->parent, "Invalid PCI Offset alignment\n");
529 return -EINVAL;
530 }
531
532 /* Disable while we are mucking around */
533 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
534 TSI148_LCSR_OFFSET_ITAT);
535 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
536 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
537 TSI148_LCSR_OFFSET_ITAT);
538
539 /* Setup mapping */
540 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
541 TSI148_LCSR_OFFSET_ITSAU);
542 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
543 TSI148_LCSR_OFFSET_ITSAL);
544 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
545 TSI148_LCSR_OFFSET_ITEAU);
546 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
547 TSI148_LCSR_OFFSET_ITEAL);
548 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
549 TSI148_LCSR_OFFSET_ITOFU);
550 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
551 TSI148_LCSR_OFFSET_ITOFL);
552
553 /* Setup 2eSST speeds */
554 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
555 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
556 case VME_2eSST160:
557 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
558 break;
559 case VME_2eSST267:
560 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
561 break;
562 case VME_2eSST320:
563 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
564 break;
565 }
566
567 /* Setup cycle types */
568 temp_ctl &= ~(0x1F << 7);
569 if (cycle & VME_BLT)
570 temp_ctl |= TSI148_LCSR_ITAT_BLT;
571 if (cycle & VME_MBLT)
572 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
573 if (cycle & VME_2eVME)
574 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
575 if (cycle & VME_2eSST)
576 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
577 if (cycle & VME_2eSSTB)
578 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
579
580 /* Setup address space */
581 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
582 temp_ctl |= addr;
583
584 temp_ctl &= ~0xF;
585 if (cycle & VME_SUPER)
586 temp_ctl |= TSI148_LCSR_ITAT_SUPR;
587 if (cycle & VME_USER)
588 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
589 if (cycle & VME_PROG)
590 temp_ctl |= TSI148_LCSR_ITAT_PGM;
591 if (cycle & VME_DATA)
592 temp_ctl |= TSI148_LCSR_ITAT_DATA;
593
594 /* Write ctl reg without enable */
595 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
596 TSI148_LCSR_OFFSET_ITAT);
597
598 if (enabled)
599 temp_ctl |= TSI148_LCSR_ITAT_EN;
600
601 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
602 TSI148_LCSR_OFFSET_ITAT);
603
604 return 0;
605 }
606
607 /*
608 * Get slave window configuration.
609 */
tsi148_slave_get(struct vme_slave_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,dma_addr_t * pci_base,u32 * aspace,u32 * cycle)610 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
611 unsigned long long *vme_base, unsigned long long *size,
612 dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
613 {
614 unsigned int i, granularity = 0, ctl = 0;
615 unsigned int vme_base_low, vme_base_high;
616 unsigned int vme_bound_low, vme_bound_high;
617 unsigned int pci_offset_low, pci_offset_high;
618 unsigned long long vme_bound, pci_offset;
619 struct tsi148_driver *bridge;
620
621 bridge = image->parent->driver_priv;
622
623 i = image->number;
624
625 /* Read registers */
626 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
627 TSI148_LCSR_OFFSET_ITAT);
628
629 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
630 TSI148_LCSR_OFFSET_ITSAU);
631 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
632 TSI148_LCSR_OFFSET_ITSAL);
633 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
634 TSI148_LCSR_OFFSET_ITEAU);
635 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
636 TSI148_LCSR_OFFSET_ITEAL);
637 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
638 TSI148_LCSR_OFFSET_ITOFU);
639 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
640 TSI148_LCSR_OFFSET_ITOFL);
641
642 /* Convert 64-bit variables to 2x 32-bit variables */
643 reg_join(vme_base_high, vme_base_low, vme_base);
644 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
645 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
646
647 *pci_base = (dma_addr_t)(*vme_base + pci_offset);
648
649 *enabled = 0;
650 *aspace = 0;
651 *cycle = 0;
652
653 if (ctl & TSI148_LCSR_ITAT_EN)
654 *enabled = 1;
655
656 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
657 granularity = 0x10;
658 *aspace |= VME_A16;
659 }
660 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
661 granularity = 0x1000;
662 *aspace |= VME_A24;
663 }
664 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
665 granularity = 0x10000;
666 *aspace |= VME_A32;
667 }
668 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
669 granularity = 0x10000;
670 *aspace |= VME_A64;
671 }
672
673 /* Need granularity before we set the size */
674 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
675
676
677 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
678 *cycle |= VME_2eSST160;
679 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
680 *cycle |= VME_2eSST267;
681 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
682 *cycle |= VME_2eSST320;
683
684 if (ctl & TSI148_LCSR_ITAT_BLT)
685 *cycle |= VME_BLT;
686 if (ctl & TSI148_LCSR_ITAT_MBLT)
687 *cycle |= VME_MBLT;
688 if (ctl & TSI148_LCSR_ITAT_2eVME)
689 *cycle |= VME_2eVME;
690 if (ctl & TSI148_LCSR_ITAT_2eSST)
691 *cycle |= VME_2eSST;
692 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
693 *cycle |= VME_2eSSTB;
694
695 if (ctl & TSI148_LCSR_ITAT_SUPR)
696 *cycle |= VME_SUPER;
697 if (ctl & TSI148_LCSR_ITAT_NPRIV)
698 *cycle |= VME_USER;
699 if (ctl & TSI148_LCSR_ITAT_PGM)
700 *cycle |= VME_PROG;
701 if (ctl & TSI148_LCSR_ITAT_DATA)
702 *cycle |= VME_DATA;
703
704 return 0;
705 }
706
707 /*
708 * Allocate and map PCI Resource
709 */
tsi148_alloc_resource(struct vme_master_resource * image,unsigned long long size)710 static int tsi148_alloc_resource(struct vme_master_resource *image,
711 unsigned long long size)
712 {
713 unsigned long long existing_size;
714 int retval = 0;
715 struct pci_dev *pdev;
716 struct vme_bridge *tsi148_bridge;
717
718 tsi148_bridge = image->parent;
719
720 pdev = to_pci_dev(tsi148_bridge->parent);
721
722 existing_size = (unsigned long long)(image->bus_resource.end -
723 image->bus_resource.start);
724
725 /* If the existing size is OK, return */
726 if ((size != 0) && (existing_size == (size - 1)))
727 return 0;
728
729 if (existing_size != 0) {
730 iounmap(image->kern_base);
731 image->kern_base = NULL;
732 kfree(image->bus_resource.name);
733 release_resource(&image->bus_resource);
734 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
735 }
736
737 /* Exit here if size is zero */
738 if (size == 0)
739 return 0;
740
741 if (!image->bus_resource.name) {
742 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
743 if (!image->bus_resource.name) {
744 retval = -ENOMEM;
745 goto err_name;
746 }
747 }
748
749 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
750 image->number);
751
752 image->bus_resource.start = 0;
753 image->bus_resource.end = (unsigned long)size;
754 image->bus_resource.flags = IORESOURCE_MEM;
755
756 retval = pci_bus_alloc_resource(pdev->bus,
757 &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
758 0, NULL, NULL);
759 if (retval) {
760 dev_err(tsi148_bridge->parent, "Failed to allocate mem resource for window %d size 0x%lx start 0x%lx\n",
761 image->number, (unsigned long)size,
762 (unsigned long)image->bus_resource.start);
763 goto err_resource;
764 }
765
766 image->kern_base = ioremap(
767 image->bus_resource.start, size);
768 if (!image->kern_base) {
769 dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
770 retval = -ENOMEM;
771 goto err_remap;
772 }
773
774 return 0;
775
776 err_remap:
777 release_resource(&image->bus_resource);
778 err_resource:
779 kfree(image->bus_resource.name);
780 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
781 err_name:
782 return retval;
783 }
784
785 /*
786 * Free and unmap PCI Resource
787 */
tsi148_free_resource(struct vme_master_resource * image)788 static void tsi148_free_resource(struct vme_master_resource *image)
789 {
790 iounmap(image->kern_base);
791 image->kern_base = NULL;
792 release_resource(&image->bus_resource);
793 kfree(image->bus_resource.name);
794 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
795 }
796
797 /*
798 * Set the attributes of an outbound window.
799 */
tsi148_master_set(struct vme_master_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,u32 aspace,u32 cycle,u32 dwidth)800 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
801 unsigned long long vme_base, unsigned long long size, u32 aspace,
802 u32 cycle, u32 dwidth)
803 {
804 int retval = 0;
805 unsigned int i;
806 unsigned int temp_ctl = 0;
807 unsigned int pci_base_low, pci_base_high;
808 unsigned int pci_bound_low, pci_bound_high;
809 unsigned int vme_offset_low, vme_offset_high;
810 unsigned long long pci_bound, vme_offset, pci_base;
811 struct vme_bridge *tsi148_bridge;
812 struct tsi148_driver *bridge;
813 struct pci_bus_region region;
814 struct pci_dev *pdev;
815
816 tsi148_bridge = image->parent;
817
818 bridge = tsi148_bridge->driver_priv;
819
820 pdev = to_pci_dev(tsi148_bridge->parent);
821
822 /* Verify input data */
823 if (vme_base & 0xFFFF) {
824 dev_err(tsi148_bridge->parent, "Invalid VME Window alignment\n");
825 retval = -EINVAL;
826 goto err_window;
827 }
828
829 if ((size == 0) && (enabled != 0)) {
830 dev_err(tsi148_bridge->parent, "Size must be non-zero for enabled windows\n");
831 retval = -EINVAL;
832 goto err_window;
833 }
834
835 spin_lock(&image->lock);
836
837 /* Let's allocate the resource here rather than further up the stack as
838 * it avoids pushing loads of bus dependent stuff up the stack. If size
839 * is zero, any existing resource will be freed.
840 */
841 retval = tsi148_alloc_resource(image, size);
842 if (retval) {
843 spin_unlock(&image->lock);
844 dev_err(tsi148_bridge->parent, "Unable to allocate memory for resource\n");
845 goto err_res;
846 }
847
848 if (size == 0) {
849 pci_base = 0;
850 pci_bound = 0;
851 vme_offset = 0;
852 } else {
853 pcibios_resource_to_bus(pdev->bus, ®ion,
854 &image->bus_resource);
855 pci_base = region.start;
856
857 /*
858 * Bound address is a valid address for the window, adjust
859 * according to window granularity.
860 */
861 pci_bound = pci_base + (size - 0x10000);
862 vme_offset = vme_base - pci_base;
863 }
864
865 /* Convert 64-bit variables to 2x 32-bit variables */
866 reg_split(pci_base, &pci_base_high, &pci_base_low);
867 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
868 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
869
870 if (pci_base_low & 0xFFFF) {
871 spin_unlock(&image->lock);
872 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
873 retval = -EINVAL;
874 goto err_gran;
875 }
876 if (pci_bound_low & 0xFFFF) {
877 spin_unlock(&image->lock);
878 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
879 retval = -EINVAL;
880 goto err_gran;
881 }
882 if (vme_offset_low & 0xFFFF) {
883 spin_unlock(&image->lock);
884 dev_err(tsi148_bridge->parent, "Invalid VME Offset alignment\n");
885 retval = -EINVAL;
886 goto err_gran;
887 }
888
889 i = image->number;
890
891 /* Disable while we are mucking around */
892 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
893 TSI148_LCSR_OFFSET_OTAT);
894 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
895 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
896 TSI148_LCSR_OFFSET_OTAT);
897
898 /* Setup 2eSST speeds */
899 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
900 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
901 case VME_2eSST160:
902 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
903 break;
904 case VME_2eSST267:
905 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
906 break;
907 case VME_2eSST320:
908 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
909 break;
910 }
911
912 /* Setup cycle types */
913 if (cycle & VME_BLT) {
914 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
915 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
916 }
917 if (cycle & VME_MBLT) {
918 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
919 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
920 }
921 if (cycle & VME_2eVME) {
922 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
923 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
924 }
925 if (cycle & VME_2eSST) {
926 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
927 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
928 }
929 if (cycle & VME_2eSSTB) {
930 dev_warn(tsi148_bridge->parent, "Currently not setting Broadcast Select Registers\n");
931 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
932 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
933 }
934
935 /* Setup data width */
936 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
937 switch (dwidth) {
938 case VME_D16:
939 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
940 break;
941 case VME_D32:
942 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
943 break;
944 default:
945 spin_unlock(&image->lock);
946 dev_err(tsi148_bridge->parent, "Invalid data width\n");
947 retval = -EINVAL;
948 goto err_dwidth;
949 }
950
951 /* Setup address space */
952 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
953 switch (aspace) {
954 case VME_A16:
955 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
956 break;
957 case VME_A24:
958 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
959 break;
960 case VME_A32:
961 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
962 break;
963 case VME_A64:
964 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
965 break;
966 case VME_CRCSR:
967 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
968 break;
969 case VME_USER1:
970 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
971 break;
972 case VME_USER2:
973 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
974 break;
975 case VME_USER3:
976 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
977 break;
978 case VME_USER4:
979 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
980 break;
981 default:
982 spin_unlock(&image->lock);
983 dev_err(tsi148_bridge->parent, "Invalid address space\n");
984 retval = -EINVAL;
985 goto err_aspace;
986 }
987
988 temp_ctl &= ~(3<<4);
989 if (cycle & VME_SUPER)
990 temp_ctl |= TSI148_LCSR_OTAT_SUP;
991 if (cycle & VME_PROG)
992 temp_ctl |= TSI148_LCSR_OTAT_PGM;
993
994 /* Setup mapping */
995 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
996 TSI148_LCSR_OFFSET_OTSAU);
997 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
998 TSI148_LCSR_OFFSET_OTSAL);
999 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1000 TSI148_LCSR_OFFSET_OTEAU);
1001 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1002 TSI148_LCSR_OFFSET_OTEAL);
1003 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1004 TSI148_LCSR_OFFSET_OTOFU);
1005 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1006 TSI148_LCSR_OFFSET_OTOFL);
1007
1008 /* Write ctl reg without enable */
1009 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1010 TSI148_LCSR_OFFSET_OTAT);
1011
1012 if (enabled)
1013 temp_ctl |= TSI148_LCSR_OTAT_EN;
1014
1015 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1016 TSI148_LCSR_OFFSET_OTAT);
1017
1018 spin_unlock(&image->lock);
1019 return 0;
1020
1021 err_aspace:
1022 err_dwidth:
1023 err_gran:
1024 tsi148_free_resource(image);
1025 err_res:
1026 err_window:
1027 return retval;
1028
1029 }
1030
1031 /*
1032 * Set the attributes of an outbound window.
1033 *
1034 * XXX Not parsing prefetch information.
1035 */
__tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1036 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1037 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1038 u32 *cycle, u32 *dwidth)
1039 {
1040 unsigned int i, ctl;
1041 unsigned int pci_base_low, pci_base_high;
1042 unsigned int pci_bound_low, pci_bound_high;
1043 unsigned int vme_offset_low, vme_offset_high;
1044
1045 unsigned long long pci_base, pci_bound, vme_offset;
1046 struct tsi148_driver *bridge;
1047
1048 bridge = image->parent->driver_priv;
1049
1050 i = image->number;
1051
1052 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1053 TSI148_LCSR_OFFSET_OTAT);
1054
1055 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1056 TSI148_LCSR_OFFSET_OTSAU);
1057 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1058 TSI148_LCSR_OFFSET_OTSAL);
1059 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1060 TSI148_LCSR_OFFSET_OTEAU);
1061 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1062 TSI148_LCSR_OFFSET_OTEAL);
1063 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1064 TSI148_LCSR_OFFSET_OTOFU);
1065 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1066 TSI148_LCSR_OFFSET_OTOFL);
1067
1068 /* Convert 64-bit variables to 2x 32-bit variables */
1069 reg_join(pci_base_high, pci_base_low, &pci_base);
1070 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1071 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1072
1073 *vme_base = pci_base + vme_offset;
1074 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1075
1076 *enabled = 0;
1077 *aspace = 0;
1078 *cycle = 0;
1079 *dwidth = 0;
1080
1081 if (ctl & TSI148_LCSR_OTAT_EN)
1082 *enabled = 1;
1083
1084 /* Setup address space */
1085 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1086 *aspace |= VME_A16;
1087 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1088 *aspace |= VME_A24;
1089 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1090 *aspace |= VME_A32;
1091 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1092 *aspace |= VME_A64;
1093 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1094 *aspace |= VME_CRCSR;
1095 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1096 *aspace |= VME_USER1;
1097 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1098 *aspace |= VME_USER2;
1099 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1100 *aspace |= VME_USER3;
1101 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1102 *aspace |= VME_USER4;
1103
1104 /* Setup 2eSST speeds */
1105 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1106 *cycle |= VME_2eSST160;
1107 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1108 *cycle |= VME_2eSST267;
1109 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1110 *cycle |= VME_2eSST320;
1111
1112 /* Setup cycle types */
1113 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1114 *cycle |= VME_SCT;
1115 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1116 *cycle |= VME_BLT;
1117 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1118 *cycle |= VME_MBLT;
1119 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1120 *cycle |= VME_2eVME;
1121 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1122 *cycle |= VME_2eSST;
1123 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1124 *cycle |= VME_2eSSTB;
1125
1126 if (ctl & TSI148_LCSR_OTAT_SUP)
1127 *cycle |= VME_SUPER;
1128 else
1129 *cycle |= VME_USER;
1130
1131 if (ctl & TSI148_LCSR_OTAT_PGM)
1132 *cycle |= VME_PROG;
1133 else
1134 *cycle |= VME_DATA;
1135
1136 /* Setup data width */
1137 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1138 *dwidth = VME_D16;
1139 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1140 *dwidth = VME_D32;
1141
1142 return 0;
1143 }
1144
1145
tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1146 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1147 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1148 u32 *cycle, u32 *dwidth)
1149 {
1150 int retval;
1151
1152 spin_lock(&image->lock);
1153
1154 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1155 cycle, dwidth);
1156
1157 spin_unlock(&image->lock);
1158
1159 return retval;
1160 }
1161
tsi148_master_read(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1162 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1163 size_t count, loff_t offset)
1164 {
1165 int retval, enabled;
1166 unsigned long long vme_base, size;
1167 u32 aspace, cycle, dwidth;
1168 struct vme_error_handler *handler = NULL;
1169 struct vme_bridge *tsi148_bridge;
1170 void __iomem *addr = image->kern_base + offset;
1171 unsigned int done = 0;
1172 unsigned int count32;
1173
1174 tsi148_bridge = image->parent;
1175
1176 spin_lock(&image->lock);
1177
1178 if (err_chk) {
1179 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1180 &cycle, &dwidth);
1181 handler = vme_register_error_handler(tsi148_bridge, aspace,
1182 vme_base + offset, count);
1183 if (!handler) {
1184 spin_unlock(&image->lock);
1185 return -ENOMEM;
1186 }
1187 }
1188
1189 /* The following code handles VME address alignment. We cannot use
1190 * memcpy_xxx here because it may cut data transfers in to 8-bit
1191 * cycles when D16 or D32 cycles are required on the VME bus.
1192 * On the other hand, the bridge itself assures that the maximum data
1193 * cycle configured for the transfer is used and splits it
1194 * automatically for non-aligned addresses, so we don't want the
1195 * overhead of needlessly forcing small transfers for the entire cycle.
1196 */
1197 if ((uintptr_t)addr & 0x1) {
1198 *(u8 *)buf = ioread8(addr);
1199 done += 1;
1200 if (done == count)
1201 goto out;
1202 }
1203 if ((uintptr_t)(addr + done) & 0x2) {
1204 if ((count - done) < 2) {
1205 *(u8 *)(buf + done) = ioread8(addr + done);
1206 done += 1;
1207 goto out;
1208 } else {
1209 *(u16 *)(buf + done) = ioread16(addr + done);
1210 done += 2;
1211 }
1212 }
1213
1214 count32 = (count - done) & ~0x3;
1215 while (done < count32) {
1216 *(u32 *)(buf + done) = ioread32(addr + done);
1217 done += 4;
1218 }
1219
1220 if ((count - done) & 0x2) {
1221 *(u16 *)(buf + done) = ioread16(addr + done);
1222 done += 2;
1223 }
1224 if ((count - done) & 0x1) {
1225 *(u8 *)(buf + done) = ioread8(addr + done);
1226 done += 1;
1227 }
1228
1229 out:
1230 retval = count;
1231
1232 if (err_chk) {
1233 if (handler->num_errors) {
1234 dev_err(image->parent->parent,
1235 "First VME read error detected an at address 0x%llx\n",
1236 handler->first_error);
1237 retval = handler->first_error - (vme_base + offset);
1238 }
1239 vme_unregister_error_handler(handler);
1240 }
1241
1242 spin_unlock(&image->lock);
1243
1244 return retval;
1245 }
1246
1247
tsi148_master_write(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1248 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1249 size_t count, loff_t offset)
1250 {
1251 int retval = 0, enabled;
1252 unsigned long long vme_base, size;
1253 u32 aspace, cycle, dwidth;
1254 void __iomem *addr = image->kern_base + offset;
1255 unsigned int done = 0;
1256 unsigned int count32;
1257
1258 struct vme_error_handler *handler = NULL;
1259 struct vme_bridge *tsi148_bridge;
1260 struct tsi148_driver *bridge;
1261
1262 tsi148_bridge = image->parent;
1263
1264 bridge = tsi148_bridge->driver_priv;
1265
1266 spin_lock(&image->lock);
1267
1268 if (err_chk) {
1269 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1270 &cycle, &dwidth);
1271 handler = vme_register_error_handler(tsi148_bridge, aspace,
1272 vme_base + offset, count);
1273 if (!handler) {
1274 spin_unlock(&image->lock);
1275 return -ENOMEM;
1276 }
1277 }
1278
1279 /* Here we apply for the same strategy we do in master_read
1280 * function in order to assure the correct cycles.
1281 */
1282 if ((uintptr_t)addr & 0x1) {
1283 iowrite8(*(u8 *)buf, addr);
1284 done += 1;
1285 if (done == count)
1286 goto out;
1287 }
1288 if ((uintptr_t)(addr + done) & 0x2) {
1289 if ((count - done) < 2) {
1290 iowrite8(*(u8 *)(buf + done), addr + done);
1291 done += 1;
1292 goto out;
1293 } else {
1294 iowrite16(*(u16 *)(buf + done), addr + done);
1295 done += 2;
1296 }
1297 }
1298
1299 count32 = (count - done) & ~0x3;
1300 while (done < count32) {
1301 iowrite32(*(u32 *)(buf + done), addr + done);
1302 done += 4;
1303 }
1304
1305 if ((count - done) & 0x2) {
1306 iowrite16(*(u16 *)(buf + done), addr + done);
1307 done += 2;
1308 }
1309 if ((count - done) & 0x1) {
1310 iowrite8(*(u8 *)(buf + done), addr + done);
1311 done += 1;
1312 }
1313
1314 out:
1315 retval = count;
1316
1317 /*
1318 * Writes are posted. We need to do a read on the VME bus to flush out
1319 * all of the writes before we check for errors. We can't guarantee
1320 * that reading the data we have just written is safe. It is believed
1321 * that there isn't any read, write re-ordering, so we can read any
1322 * location in VME space, so lets read the Device ID from the tsi148's
1323 * own registers as mapped into CR/CSR space.
1324 *
1325 * We check for saved errors in the written address range/space.
1326 */
1327
1328 if (err_chk) {
1329 ioread16(bridge->flush_image->kern_base + 0x7F000);
1330
1331 if (handler->num_errors) {
1332 dev_warn(tsi148_bridge->parent,
1333 "First VME write error detected an at address 0x%llx\n",
1334 handler->first_error);
1335 retval = handler->first_error - (vme_base + offset);
1336 }
1337 vme_unregister_error_handler(handler);
1338 }
1339
1340 spin_unlock(&image->lock);
1341
1342 return retval;
1343 }
1344
1345 /*
1346 * Perform an RMW cycle on the VME bus.
1347 *
1348 * Requires a previously configured master window, returns final value.
1349 */
tsi148_master_rmw(struct vme_master_resource * image,unsigned int mask,unsigned int compare,unsigned int swap,loff_t offset)1350 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1351 unsigned int mask, unsigned int compare, unsigned int swap,
1352 loff_t offset)
1353 {
1354 unsigned long long pci_addr;
1355 unsigned int pci_addr_high, pci_addr_low;
1356 u32 tmp, result;
1357 int i;
1358 struct tsi148_driver *bridge;
1359
1360 bridge = image->parent->driver_priv;
1361
1362 /* Find the PCI address that maps to the desired VME address */
1363 i = image->number;
1364
1365 /* Locking as we can only do one of these at a time */
1366 mutex_lock(&bridge->vme_rmw);
1367
1368 /* Lock image */
1369 spin_lock(&image->lock);
1370
1371 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1372 TSI148_LCSR_OFFSET_OTSAU);
1373 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1374 TSI148_LCSR_OFFSET_OTSAL);
1375
1376 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1377 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1378
1379 /* Configure registers */
1380 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1381 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1382 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1383 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1384 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1385
1386 /* Enable RMW */
1387 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1388 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1389 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1390
1391 /* Kick process off with a read to the required address. */
1392 result = ioread32be(image->kern_base + offset);
1393
1394 /* Disable RMW */
1395 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1396 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1397 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1398
1399 spin_unlock(&image->lock);
1400
1401 mutex_unlock(&bridge->vme_rmw);
1402
1403 return result;
1404 }
1405
tsi148_dma_set_vme_src_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1406 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1407 u32 aspace, u32 cycle, u32 dwidth)
1408 {
1409 u32 val;
1410
1411 val = be32_to_cpu(*attr);
1412
1413 /* Setup 2eSST speeds */
1414 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1415 case VME_2eSST160:
1416 val |= TSI148_LCSR_DSAT_2eSSTM_160;
1417 break;
1418 case VME_2eSST267:
1419 val |= TSI148_LCSR_DSAT_2eSSTM_267;
1420 break;
1421 case VME_2eSST320:
1422 val |= TSI148_LCSR_DSAT_2eSSTM_320;
1423 break;
1424 }
1425
1426 /* Setup cycle types */
1427 if (cycle & VME_SCT)
1428 val |= TSI148_LCSR_DSAT_TM_SCT;
1429
1430 if (cycle & VME_BLT)
1431 val |= TSI148_LCSR_DSAT_TM_BLT;
1432
1433 if (cycle & VME_MBLT)
1434 val |= TSI148_LCSR_DSAT_TM_MBLT;
1435
1436 if (cycle & VME_2eVME)
1437 val |= TSI148_LCSR_DSAT_TM_2eVME;
1438
1439 if (cycle & VME_2eSST)
1440 val |= TSI148_LCSR_DSAT_TM_2eSST;
1441
1442 if (cycle & VME_2eSSTB) {
1443 dev_err(dev, "Currently not setting Broadcast Select Registers\n");
1444 val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1445 }
1446
1447 /* Setup data width */
1448 switch (dwidth) {
1449 case VME_D16:
1450 val |= TSI148_LCSR_DSAT_DBW_16;
1451 break;
1452 case VME_D32:
1453 val |= TSI148_LCSR_DSAT_DBW_32;
1454 break;
1455 default:
1456 dev_err(dev, "Invalid data width\n");
1457 return -EINVAL;
1458 }
1459
1460 /* Setup address space */
1461 switch (aspace) {
1462 case VME_A16:
1463 val |= TSI148_LCSR_DSAT_AMODE_A16;
1464 break;
1465 case VME_A24:
1466 val |= TSI148_LCSR_DSAT_AMODE_A24;
1467 break;
1468 case VME_A32:
1469 val |= TSI148_LCSR_DSAT_AMODE_A32;
1470 break;
1471 case VME_A64:
1472 val |= TSI148_LCSR_DSAT_AMODE_A64;
1473 break;
1474 case VME_CRCSR:
1475 val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1476 break;
1477 case VME_USER1:
1478 val |= TSI148_LCSR_DSAT_AMODE_USER1;
1479 break;
1480 case VME_USER2:
1481 val |= TSI148_LCSR_DSAT_AMODE_USER2;
1482 break;
1483 case VME_USER3:
1484 val |= TSI148_LCSR_DSAT_AMODE_USER3;
1485 break;
1486 case VME_USER4:
1487 val |= TSI148_LCSR_DSAT_AMODE_USER4;
1488 break;
1489 default:
1490 dev_err(dev, "Invalid address space\n");
1491 return -EINVAL;
1492 }
1493
1494 if (cycle & VME_SUPER)
1495 val |= TSI148_LCSR_DSAT_SUP;
1496 if (cycle & VME_PROG)
1497 val |= TSI148_LCSR_DSAT_PGM;
1498
1499 *attr = cpu_to_be32(val);
1500
1501 return 0;
1502 }
1503
tsi148_dma_set_vme_dest_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1504 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1505 u32 aspace, u32 cycle, u32 dwidth)
1506 {
1507 u32 val;
1508
1509 val = be32_to_cpu(*attr);
1510
1511 /* Setup 2eSST speeds */
1512 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1513 case VME_2eSST160:
1514 val |= TSI148_LCSR_DDAT_2eSSTM_160;
1515 break;
1516 case VME_2eSST267:
1517 val |= TSI148_LCSR_DDAT_2eSSTM_267;
1518 break;
1519 case VME_2eSST320:
1520 val |= TSI148_LCSR_DDAT_2eSSTM_320;
1521 break;
1522 }
1523
1524 /* Setup cycle types */
1525 if (cycle & VME_SCT)
1526 val |= TSI148_LCSR_DDAT_TM_SCT;
1527
1528 if (cycle & VME_BLT)
1529 val |= TSI148_LCSR_DDAT_TM_BLT;
1530
1531 if (cycle & VME_MBLT)
1532 val |= TSI148_LCSR_DDAT_TM_MBLT;
1533
1534 if (cycle & VME_2eVME)
1535 val |= TSI148_LCSR_DDAT_TM_2eVME;
1536
1537 if (cycle & VME_2eSST)
1538 val |= TSI148_LCSR_DDAT_TM_2eSST;
1539
1540 if (cycle & VME_2eSSTB) {
1541 dev_err(dev, "Currently not setting Broadcast Select Registers\n");
1542 val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1543 }
1544
1545 /* Setup data width */
1546 switch (dwidth) {
1547 case VME_D16:
1548 val |= TSI148_LCSR_DDAT_DBW_16;
1549 break;
1550 case VME_D32:
1551 val |= TSI148_LCSR_DDAT_DBW_32;
1552 break;
1553 default:
1554 dev_err(dev, "Invalid data width\n");
1555 return -EINVAL;
1556 }
1557
1558 /* Setup address space */
1559 switch (aspace) {
1560 case VME_A16:
1561 val |= TSI148_LCSR_DDAT_AMODE_A16;
1562 break;
1563 case VME_A24:
1564 val |= TSI148_LCSR_DDAT_AMODE_A24;
1565 break;
1566 case VME_A32:
1567 val |= TSI148_LCSR_DDAT_AMODE_A32;
1568 break;
1569 case VME_A64:
1570 val |= TSI148_LCSR_DDAT_AMODE_A64;
1571 break;
1572 case VME_CRCSR:
1573 val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1574 break;
1575 case VME_USER1:
1576 val |= TSI148_LCSR_DDAT_AMODE_USER1;
1577 break;
1578 case VME_USER2:
1579 val |= TSI148_LCSR_DDAT_AMODE_USER2;
1580 break;
1581 case VME_USER3:
1582 val |= TSI148_LCSR_DDAT_AMODE_USER3;
1583 break;
1584 case VME_USER4:
1585 val |= TSI148_LCSR_DDAT_AMODE_USER4;
1586 break;
1587 default:
1588 dev_err(dev, "Invalid address space\n");
1589 return -EINVAL;
1590 }
1591
1592 if (cycle & VME_SUPER)
1593 val |= TSI148_LCSR_DDAT_SUP;
1594 if (cycle & VME_PROG)
1595 val |= TSI148_LCSR_DDAT_PGM;
1596
1597 *attr = cpu_to_be32(val);
1598
1599 return 0;
1600 }
1601
1602 /*
1603 * Add a link list descriptor to the list
1604 *
1605 * Note: DMA engine expects the DMA descriptor to be big endian.
1606 */
tsi148_dma_list_add(struct vme_dma_list * list,struct vme_dma_attr * src,struct vme_dma_attr * dest,size_t count)1607 static int tsi148_dma_list_add(struct vme_dma_list *list,
1608 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1609 {
1610 struct tsi148_dma_entry *entry, *prev;
1611 u32 address_high, address_low, val;
1612 struct vme_dma_pattern *pattern_attr;
1613 struct vme_dma_pci *pci_attr;
1614 struct vme_dma_vme *vme_attr;
1615 int retval = 0;
1616 struct vme_bridge *tsi148_bridge;
1617
1618 tsi148_bridge = list->parent->parent;
1619
1620 /* Descriptor must be aligned on 64-bit boundaries */
1621 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1622 if (!entry) {
1623 retval = -ENOMEM;
1624 goto err_mem;
1625 }
1626
1627 /* Test descriptor alignment */
1628 if ((unsigned long)&entry->descriptor & 0x7) {
1629 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 byte boundary as required: %p\n",
1630 &entry->descriptor);
1631 retval = -EINVAL;
1632 goto err_align;
1633 }
1634
1635 /* Given we are going to fill out the structure, we probably don't
1636 * need to zero it, but better safe than sorry for now.
1637 */
1638 memset(&entry->descriptor, 0, sizeof(entry->descriptor));
1639
1640 /* Fill out source part */
1641 switch (src->type) {
1642 case VME_DMA_PATTERN:
1643 pattern_attr = src->private;
1644
1645 entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1646
1647 val = TSI148_LCSR_DSAT_TYP_PAT;
1648
1649 /* Default behaviour is 32 bit pattern */
1650 if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1651 val |= TSI148_LCSR_DSAT_PSZ;
1652
1653 /* It seems that the default behaviour is to increment */
1654 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1655 val |= TSI148_LCSR_DSAT_NIN;
1656 entry->descriptor.dsat = cpu_to_be32(val);
1657 break;
1658 case VME_DMA_PCI:
1659 pci_attr = src->private;
1660
1661 reg_split((unsigned long long)pci_attr->address, &address_high,
1662 &address_low);
1663 entry->descriptor.dsau = cpu_to_be32(address_high);
1664 entry->descriptor.dsal = cpu_to_be32(address_low);
1665 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1666 break;
1667 case VME_DMA_VME:
1668 vme_attr = src->private;
1669
1670 reg_split((unsigned long long)vme_attr->address, &address_high,
1671 &address_low);
1672 entry->descriptor.dsau = cpu_to_be32(address_high);
1673 entry->descriptor.dsal = cpu_to_be32(address_low);
1674 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1675
1676 retval = tsi148_dma_set_vme_src_attributes(
1677 tsi148_bridge->parent, &entry->descriptor.dsat,
1678 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1679 if (retval < 0)
1680 goto err_source;
1681 break;
1682 default:
1683 dev_err(tsi148_bridge->parent, "Invalid source type\n");
1684 retval = -EINVAL;
1685 goto err_source;
1686 }
1687
1688 /* Assume last link - this will be over-written by adding another */
1689 entry->descriptor.dnlau = cpu_to_be32(0);
1690 entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1691
1692 /* Fill out destination part */
1693 switch (dest->type) {
1694 case VME_DMA_PCI:
1695 pci_attr = dest->private;
1696
1697 reg_split((unsigned long long)pci_attr->address, &address_high,
1698 &address_low);
1699 entry->descriptor.ddau = cpu_to_be32(address_high);
1700 entry->descriptor.ddal = cpu_to_be32(address_low);
1701 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1702 break;
1703 case VME_DMA_VME:
1704 vme_attr = dest->private;
1705
1706 reg_split((unsigned long long)vme_attr->address, &address_high,
1707 &address_low);
1708 entry->descriptor.ddau = cpu_to_be32(address_high);
1709 entry->descriptor.ddal = cpu_to_be32(address_low);
1710 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1711
1712 retval = tsi148_dma_set_vme_dest_attributes(
1713 tsi148_bridge->parent, &entry->descriptor.ddat,
1714 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1715 if (retval < 0)
1716 goto err_dest;
1717 break;
1718 default:
1719 dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1720 retval = -EINVAL;
1721 goto err_dest;
1722 }
1723
1724 /* Fill out count */
1725 entry->descriptor.dcnt = cpu_to_be32((u32)count);
1726
1727 /* Add to list */
1728 list_add_tail(&entry->list, &list->entries);
1729
1730 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1731 &entry->descriptor,
1732 sizeof(entry->descriptor),
1733 DMA_TO_DEVICE);
1734 if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
1735 dev_err(tsi148_bridge->parent, "DMA mapping error\n");
1736 retval = -EINVAL;
1737 goto err_dma;
1738 }
1739
1740 /* Fill out previous descriptors "Next Address" */
1741 if (entry->list.prev != &list->entries) {
1742 reg_split((unsigned long long)entry->dma_handle, &address_high,
1743 &address_low);
1744 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1745 list);
1746 prev->descriptor.dnlau = cpu_to_be32(address_high);
1747 prev->descriptor.dnlal = cpu_to_be32(address_low);
1748
1749 }
1750
1751 return 0;
1752
1753 err_dma:
1754 err_dest:
1755 err_source:
1756 err_align:
1757 kfree(entry);
1758 err_mem:
1759 return retval;
1760 }
1761
1762 /*
1763 * Check to see if the provided DMA channel is busy.
1764 */
tsi148_dma_busy(struct vme_bridge * tsi148_bridge,int channel)1765 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1766 {
1767 u32 tmp;
1768 struct tsi148_driver *bridge;
1769
1770 bridge = tsi148_bridge->driver_priv;
1771
1772 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1773 TSI148_LCSR_OFFSET_DSTA);
1774
1775 if (tmp & TSI148_LCSR_DSTA_BSY)
1776 return 0;
1777 else
1778 return 1;
1779
1780 }
1781
1782 /*
1783 * Execute a previously generated link list
1784 *
1785 * XXX Need to provide control register configuration.
1786 */
tsi148_dma_list_exec(struct vme_dma_list * list)1787 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1788 {
1789 struct vme_dma_resource *ctrlr;
1790 int channel, retval;
1791 struct tsi148_dma_entry *entry;
1792 u32 bus_addr_high, bus_addr_low;
1793 u32 val, dctlreg = 0;
1794 struct vme_bridge *tsi148_bridge;
1795 struct tsi148_driver *bridge;
1796
1797 ctrlr = list->parent;
1798
1799 tsi148_bridge = ctrlr->parent;
1800
1801 bridge = tsi148_bridge->driver_priv;
1802
1803 mutex_lock(&ctrlr->mtx);
1804
1805 channel = ctrlr->number;
1806
1807 if (!list_empty(&ctrlr->running)) {
1808 /*
1809 * XXX We have an active DMA transfer and currently haven't
1810 * sorted out the mechanism for "pending" DMA transfers.
1811 * Return busy.
1812 */
1813 /* Need to add to pending here */
1814 mutex_unlock(&ctrlr->mtx);
1815 return -EBUSY;
1816 }
1817
1818 list_add(&list->list, &ctrlr->running);
1819
1820 /* Get first bus address and write into registers */
1821 entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1822 list);
1823
1824 mutex_unlock(&ctrlr->mtx);
1825
1826 reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1827
1828 iowrite32be(bus_addr_high, bridge->base +
1829 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1830 iowrite32be(bus_addr_low, bridge->base +
1831 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1832
1833 dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1834 TSI148_LCSR_OFFSET_DCTL);
1835
1836 /* Start the operation */
1837 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1838 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1839
1840 retval = wait_event_interruptible(bridge->dma_queue[channel],
1841 tsi148_dma_busy(ctrlr->parent, channel));
1842
1843 if (retval) {
1844 iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base +
1845 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1846 /* Wait for the operation to abort */
1847 wait_event(bridge->dma_queue[channel],
1848 tsi148_dma_busy(ctrlr->parent, channel));
1849 retval = -EINTR;
1850 goto exit;
1851 }
1852
1853 /*
1854 * Read status register, this register is valid until we kick off a
1855 * new transfer.
1856 */
1857 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1858 TSI148_LCSR_OFFSET_DSTA);
1859
1860 if (val & TSI148_LCSR_DSTA_VBE) {
1861 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1862 retval = -EIO;
1863 }
1864
1865 exit:
1866 /* Remove list from running list */
1867 mutex_lock(&ctrlr->mtx);
1868 list_del(&list->list);
1869 mutex_unlock(&ctrlr->mtx);
1870
1871 return retval;
1872 }
1873
1874 /*
1875 * Clean up a previously generated link list
1876 *
1877 * We have a separate function, don't assume that the chain can't be reused.
1878 */
tsi148_dma_list_empty(struct vme_dma_list * list)1879 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1880 {
1881 struct list_head *pos, *temp;
1882 struct tsi148_dma_entry *entry;
1883
1884 struct vme_bridge *tsi148_bridge = list->parent->parent;
1885
1886 /* detach and free each entry */
1887 list_for_each_safe(pos, temp, &list->entries) {
1888 list_del(pos);
1889 entry = list_entry(pos, struct tsi148_dma_entry, list);
1890
1891 dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1892 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1893 kfree(entry);
1894 }
1895
1896 return 0;
1897 }
1898
1899 /*
1900 * All 4 location monitors reside at the same base - this is therefore a
1901 * system wide configuration.
1902 *
1903 * This does not enable the LM monitor - that should be done when the first
1904 * callback is attached and disabled when the last callback is removed.
1905 */
tsi148_lm_set(struct vme_lm_resource * lm,unsigned long long lm_base,u32 aspace,u32 cycle)1906 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1907 u32 aspace, u32 cycle)
1908 {
1909 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1910 int i;
1911 struct vme_bridge *tsi148_bridge;
1912 struct tsi148_driver *bridge;
1913
1914 tsi148_bridge = lm->parent;
1915
1916 bridge = tsi148_bridge->driver_priv;
1917
1918 mutex_lock(&lm->mtx);
1919
1920 /* If we already have a callback attached, we can't move it! */
1921 for (i = 0; i < lm->monitors; i++) {
1922 if (bridge->lm_callback[i]) {
1923 mutex_unlock(&lm->mtx);
1924 dev_err(tsi148_bridge->parent, "Location monitor callback attached, can't reset\n");
1925 return -EBUSY;
1926 }
1927 }
1928
1929 switch (aspace) {
1930 case VME_A16:
1931 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1932 break;
1933 case VME_A24:
1934 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1935 break;
1936 case VME_A32:
1937 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1938 break;
1939 case VME_A64:
1940 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1941 break;
1942 default:
1943 mutex_unlock(&lm->mtx);
1944 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1945 return -EINVAL;
1946 }
1947
1948 if (cycle & VME_SUPER)
1949 lm_ctl |= TSI148_LCSR_LMAT_SUPR;
1950 if (cycle & VME_USER)
1951 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1952 if (cycle & VME_PROG)
1953 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1954 if (cycle & VME_DATA)
1955 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1956
1957 reg_split(lm_base, &lm_base_high, &lm_base_low);
1958
1959 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1960 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1961 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1962
1963 mutex_unlock(&lm->mtx);
1964
1965 return 0;
1966 }
1967
1968 /* Get configuration of the callback monitor and return whether it is enabled
1969 * or disabled.
1970 */
tsi148_lm_get(struct vme_lm_resource * lm,unsigned long long * lm_base,u32 * aspace,u32 * cycle)1971 static int tsi148_lm_get(struct vme_lm_resource *lm,
1972 unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1973 {
1974 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1975 struct tsi148_driver *bridge;
1976
1977 bridge = lm->parent->driver_priv;
1978
1979 mutex_lock(&lm->mtx);
1980
1981 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1982 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1983 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
1984
1985 reg_join(lm_base_high, lm_base_low, lm_base);
1986
1987 if (lm_ctl & TSI148_LCSR_LMAT_EN)
1988 enabled = 1;
1989
1990 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
1991 *aspace |= VME_A16;
1992
1993 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
1994 *aspace |= VME_A24;
1995
1996 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
1997 *aspace |= VME_A32;
1998
1999 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
2000 *aspace |= VME_A64;
2001
2002
2003 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2004 *cycle |= VME_SUPER;
2005 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2006 *cycle |= VME_USER;
2007 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2008 *cycle |= VME_PROG;
2009 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2010 *cycle |= VME_DATA;
2011
2012 mutex_unlock(&lm->mtx);
2013
2014 return enabled;
2015 }
2016
2017 /*
2018 * Attach a callback to a specific location monitor.
2019 *
2020 * Callback will be passed the monitor triggered.
2021 */
tsi148_lm_attach(struct vme_lm_resource * lm,int monitor,void (* callback)(void *),void * data)2022 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2023 void (*callback)(void *), void *data)
2024 {
2025 u32 lm_ctl, tmp;
2026 struct vme_bridge *tsi148_bridge;
2027 struct tsi148_driver *bridge;
2028
2029 tsi148_bridge = lm->parent;
2030
2031 bridge = tsi148_bridge->driver_priv;
2032
2033 mutex_lock(&lm->mtx);
2034
2035 /* Ensure that the location monitor is configured - need PGM or DATA */
2036 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2037 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2038 mutex_unlock(&lm->mtx);
2039 dev_err(tsi148_bridge->parent, "Location monitor not properly configured\n");
2040 return -EINVAL;
2041 }
2042
2043 /* Check that a callback isn't already attached */
2044 if (bridge->lm_callback[monitor]) {
2045 mutex_unlock(&lm->mtx);
2046 dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2047 return -EBUSY;
2048 }
2049
2050 /* Attach callback */
2051 bridge->lm_callback[monitor] = callback;
2052 bridge->lm_data[monitor] = data;
2053
2054 /* Enable Location Monitor interrupt */
2055 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2056 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2057 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2058
2059 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2060 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2061 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2062
2063 /* Ensure that global Location Monitor Enable set */
2064 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2065 lm_ctl |= TSI148_LCSR_LMAT_EN;
2066 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2067 }
2068
2069 mutex_unlock(&lm->mtx);
2070
2071 return 0;
2072 }
2073
2074 /*
2075 * Detach a callback function forn a specific location monitor.
2076 */
tsi148_lm_detach(struct vme_lm_resource * lm,int monitor)2077 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2078 {
2079 u32 lm_en, tmp;
2080 struct tsi148_driver *bridge;
2081
2082 bridge = lm->parent->driver_priv;
2083
2084 mutex_lock(&lm->mtx);
2085
2086 /* Disable Location Monitor and ensure previous interrupts are clear */
2087 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2088 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2089 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2090
2091 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2092 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2093 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2094
2095 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2096 bridge->base + TSI148_LCSR_INTC);
2097
2098 /* Detach callback */
2099 bridge->lm_callback[monitor] = NULL;
2100 bridge->lm_data[monitor] = NULL;
2101
2102 /* If all location monitors disabled, disable global Location Monitor */
2103 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2104 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2105 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2106 tmp &= ~TSI148_LCSR_LMAT_EN;
2107 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2108 }
2109
2110 mutex_unlock(&lm->mtx);
2111
2112 return 0;
2113 }
2114
2115 /*
2116 * Determine Geographical Addressing
2117 */
tsi148_slot_get(struct vme_bridge * tsi148_bridge)2118 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2119 {
2120 u32 slot = 0;
2121 struct tsi148_driver *bridge;
2122
2123 bridge = tsi148_bridge->driver_priv;
2124
2125 if (!geoid) {
2126 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2127 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2128 } else
2129 slot = geoid;
2130
2131 return (int)slot;
2132 }
2133
tsi148_alloc_consistent(struct device * parent,size_t size,dma_addr_t * dma)2134 static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2135 dma_addr_t *dma)
2136 {
2137 struct pci_dev *pdev;
2138
2139 /* Find pci_dev container of dev */
2140 pdev = to_pci_dev(parent);
2141
2142 return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL);
2143 }
2144
tsi148_free_consistent(struct device * parent,size_t size,void * vaddr,dma_addr_t dma)2145 static void tsi148_free_consistent(struct device *parent, size_t size,
2146 void *vaddr, dma_addr_t dma)
2147 {
2148 struct pci_dev *pdev;
2149
2150 /* Find pci_dev container of dev */
2151 pdev = to_pci_dev(parent);
2152
2153 dma_free_coherent(&pdev->dev, size, vaddr, dma);
2154 }
2155
2156 /*
2157 * Configure CR/CSR space
2158 *
2159 * Access to the CR/CSR can be configured at power-up. The location of the
2160 * CR/CSR registers in the CR/CSR address space is determined by the boards
2161 * Auto-ID or Geographic address. This function ensures that the window is
2162 * enabled at an offset consistent with the boards geopgraphic address.
2163 *
2164 * Each board has a 512kB window, with the highest 4kB being used for the
2165 * boards registers, this means there is a fix length 508kB window which must
2166 * be mapped onto PCI memory.
2167 */
tsi148_crcsr_init(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2168 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2169 struct pci_dev *pdev)
2170 {
2171 u32 cbar, crat, vstat;
2172 u32 crcsr_bus_high, crcsr_bus_low;
2173 int retval;
2174 struct tsi148_driver *bridge;
2175
2176 bridge = tsi148_bridge->driver_priv;
2177
2178 /* Allocate mem for CR/CSR image */
2179 bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev,
2180 VME_CRCSR_BUF_SIZE,
2181 &bridge->crcsr_bus, GFP_KERNEL);
2182 if (!bridge->crcsr_kernel) {
2183 dev_err(tsi148_bridge->parent, "Failed to allocate memory for CR/CSR image\n");
2184 return -ENOMEM;
2185 }
2186
2187 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2188
2189 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2190 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2191
2192 /* Ensure that the CR/CSR is configured at the correct offset */
2193 cbar = ioread32be(bridge->base + TSI148_CBAR);
2194 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2195
2196 vstat = tsi148_slot_get(tsi148_bridge);
2197
2198 if (cbar != vstat) {
2199 cbar = vstat;
2200 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2201 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2202 }
2203 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2204
2205 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2206 if (crat & TSI148_LCSR_CRAT_EN)
2207 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2208 else {
2209 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2210 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2211 bridge->base + TSI148_LCSR_CRAT);
2212 }
2213
2214 /* If we want flushed, error-checked writes, set up a window
2215 * over the CR/CSR registers. We read from here to safely flush
2216 * through VME writes.
2217 */
2218 if (err_chk) {
2219 retval = tsi148_master_set(bridge->flush_image, 1,
2220 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2221 VME_D16);
2222 if (retval)
2223 dev_err(tsi148_bridge->parent, "Configuring flush image failed\n");
2224 }
2225
2226 return 0;
2227
2228 }
2229
tsi148_crcsr_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2230 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2231 struct pci_dev *pdev)
2232 {
2233 u32 crat;
2234 struct tsi148_driver *bridge;
2235
2236 bridge = tsi148_bridge->driver_priv;
2237
2238 /* Turn off CR/CSR space */
2239 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2240 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2241 bridge->base + TSI148_LCSR_CRAT);
2242
2243 /* Free image */
2244 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2245 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2246
2247 dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE,
2248 bridge->crcsr_kernel, bridge->crcsr_bus);
2249 }
2250
tsi148_probe(struct pci_dev * pdev,const struct pci_device_id * id)2251 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2252 {
2253 int retval, i, master_num;
2254 u32 data;
2255 struct list_head *pos = NULL, *n;
2256 struct vme_bridge *tsi148_bridge;
2257 struct tsi148_driver *tsi148_device;
2258 struct vme_master_resource *master_image;
2259 struct vme_slave_resource *slave_image;
2260 struct vme_dma_resource *dma_ctrlr;
2261 struct vme_lm_resource *lm;
2262
2263 /* If we want to support more than one of each bridge, we need to
2264 * dynamically generate this so we get one per device
2265 */
2266 tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL);
2267 if (!tsi148_bridge) {
2268 retval = -ENOMEM;
2269 goto err_struct;
2270 }
2271 vme_init_bridge(tsi148_bridge);
2272
2273 tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL);
2274 if (!tsi148_device) {
2275 retval = -ENOMEM;
2276 goto err_driver;
2277 }
2278
2279 tsi148_bridge->driver_priv = tsi148_device;
2280
2281 /* Enable the device */
2282 retval = pci_enable_device(pdev);
2283 if (retval) {
2284 dev_err(&pdev->dev, "Unable to enable device\n");
2285 goto err_enable;
2286 }
2287
2288 /* Map Registers */
2289 retval = pci_request_regions(pdev, driver_name);
2290 if (retval) {
2291 dev_err(&pdev->dev, "Unable to reserve resources\n");
2292 goto err_resource;
2293 }
2294
2295 /* map registers in BAR 0 */
2296 tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
2297 4096);
2298 if (!tsi148_device->base) {
2299 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2300 retval = -EIO;
2301 goto err_remap;
2302 }
2303
2304 /* Check to see if the mapping worked out */
2305 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2306 if (data != PCI_VENDOR_ID_TUNDRA) {
2307 dev_err(&pdev->dev, "CRG region check failed\n");
2308 retval = -EIO;
2309 goto err_test;
2310 }
2311
2312 /* Initialize wait queues & mutual exclusion flags */
2313 init_waitqueue_head(&tsi148_device->dma_queue[0]);
2314 init_waitqueue_head(&tsi148_device->dma_queue[1]);
2315 init_waitqueue_head(&tsi148_device->iack_queue);
2316 mutex_init(&tsi148_device->vme_int);
2317 mutex_init(&tsi148_device->vme_rmw);
2318
2319 tsi148_bridge->parent = &pdev->dev;
2320 strcpy(tsi148_bridge->name, driver_name);
2321
2322 /* Setup IRQ */
2323 retval = tsi148_irq_init(tsi148_bridge);
2324 if (retval != 0) {
2325 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2326 goto err_irq;
2327 }
2328
2329 /* If we are going to flush writes, we need to read from the VME bus.
2330 * We need to do this safely, thus we read the devices own CR/CSR
2331 * register. To do this we must set up a window in CR/CSR space and
2332 * hence have one less master window resource available.
2333 */
2334 master_num = TSI148_MAX_MASTER;
2335 if (err_chk) {
2336 master_num--;
2337
2338 tsi148_device->flush_image =
2339 kmalloc(sizeof(*tsi148_device->flush_image),
2340 GFP_KERNEL);
2341 if (!tsi148_device->flush_image) {
2342 retval = -ENOMEM;
2343 goto err_master;
2344 }
2345 tsi148_device->flush_image->parent = tsi148_bridge;
2346 spin_lock_init(&tsi148_device->flush_image->lock);
2347 tsi148_device->flush_image->locked = 1;
2348 tsi148_device->flush_image->number = master_num;
2349 memset(&tsi148_device->flush_image->bus_resource, 0,
2350 sizeof(tsi148_device->flush_image->bus_resource));
2351 tsi148_device->flush_image->kern_base = NULL;
2352 }
2353
2354 /* Add master windows to list */
2355 for (i = 0; i < master_num; i++) {
2356 master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
2357 if (!master_image) {
2358 retval = -ENOMEM;
2359 goto err_master;
2360 }
2361 master_image->parent = tsi148_bridge;
2362 spin_lock_init(&master_image->lock);
2363 master_image->locked = 0;
2364 master_image->number = i;
2365 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2366 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2367 VME_USER3 | VME_USER4;
2368 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2369 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2370 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2371 VME_PROG | VME_DATA;
2372 master_image->width_attr = VME_D16 | VME_D32;
2373 memset(&master_image->bus_resource, 0,
2374 sizeof(master_image->bus_resource));
2375 master_image->kern_base = NULL;
2376 list_add_tail(&master_image->list,
2377 &tsi148_bridge->master_resources);
2378 }
2379
2380 /* Add slave windows to list */
2381 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2382 slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
2383 if (!slave_image) {
2384 retval = -ENOMEM;
2385 goto err_slave;
2386 }
2387 slave_image->parent = tsi148_bridge;
2388 mutex_init(&slave_image->mtx);
2389 slave_image->locked = 0;
2390 slave_image->number = i;
2391 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2392 VME_A64;
2393 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2394 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2395 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2396 VME_PROG | VME_DATA;
2397 list_add_tail(&slave_image->list,
2398 &tsi148_bridge->slave_resources);
2399 }
2400
2401 /* Add dma engines to list */
2402 for (i = 0; i < TSI148_MAX_DMA; i++) {
2403 dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
2404 if (!dma_ctrlr) {
2405 retval = -ENOMEM;
2406 goto err_dma;
2407 }
2408 dma_ctrlr->parent = tsi148_bridge;
2409 mutex_init(&dma_ctrlr->mtx);
2410 dma_ctrlr->locked = 0;
2411 dma_ctrlr->number = i;
2412 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2413 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2414 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2415 VME_DMA_PATTERN_TO_MEM;
2416 INIT_LIST_HEAD(&dma_ctrlr->pending);
2417 INIT_LIST_HEAD(&dma_ctrlr->running);
2418 list_add_tail(&dma_ctrlr->list,
2419 &tsi148_bridge->dma_resources);
2420 }
2421
2422 /* Add location monitor to list */
2423 lm = kmalloc(sizeof(*lm), GFP_KERNEL);
2424 if (!lm) {
2425 retval = -ENOMEM;
2426 goto err_lm;
2427 }
2428 lm->parent = tsi148_bridge;
2429 mutex_init(&lm->mtx);
2430 lm->locked = 0;
2431 lm->number = 1;
2432 lm->monitors = 4;
2433 list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2434
2435 tsi148_bridge->slave_get = tsi148_slave_get;
2436 tsi148_bridge->slave_set = tsi148_slave_set;
2437 tsi148_bridge->master_get = tsi148_master_get;
2438 tsi148_bridge->master_set = tsi148_master_set;
2439 tsi148_bridge->master_read = tsi148_master_read;
2440 tsi148_bridge->master_write = tsi148_master_write;
2441 tsi148_bridge->master_rmw = tsi148_master_rmw;
2442 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2443 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2444 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2445 tsi148_bridge->irq_set = tsi148_irq_set;
2446 tsi148_bridge->irq_generate = tsi148_irq_generate;
2447 tsi148_bridge->lm_set = tsi148_lm_set;
2448 tsi148_bridge->lm_get = tsi148_lm_get;
2449 tsi148_bridge->lm_attach = tsi148_lm_attach;
2450 tsi148_bridge->lm_detach = tsi148_lm_detach;
2451 tsi148_bridge->slot_get = tsi148_slot_get;
2452 tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2453 tsi148_bridge->free_consistent = tsi148_free_consistent;
2454
2455 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2456 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2457 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2458 if (!geoid)
2459 dev_info(&pdev->dev, "VME geographical address is %d\n",
2460 data & TSI148_LCSR_VSTAT_GA_M);
2461 else
2462 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2463 geoid);
2464
2465 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2466 err_chk ? "enabled" : "disabled");
2467
2468 retval = tsi148_crcsr_init(tsi148_bridge, pdev);
2469 if (retval) {
2470 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2471 goto err_crcsr;
2472 }
2473
2474 retval = vme_register_bridge(tsi148_bridge);
2475 if (retval != 0) {
2476 dev_err(&pdev->dev, "Chip Registration failed.\n");
2477 goto err_reg;
2478 }
2479
2480 pci_set_drvdata(pdev, tsi148_bridge);
2481
2482 /* Clear VME bus "board fail", and "power-up reset" lines */
2483 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2484 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2485 data |= TSI148_LCSR_VSTAT_CPURST;
2486 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2487
2488 return 0;
2489
2490 err_reg:
2491 tsi148_crcsr_exit(tsi148_bridge, pdev);
2492 err_crcsr:
2493 err_lm:
2494 /* resources are stored in link list */
2495 list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
2496 lm = list_entry(pos, struct vme_lm_resource, list);
2497 list_del(pos);
2498 kfree(lm);
2499 }
2500 err_dma:
2501 /* resources are stored in link list */
2502 list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
2503 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2504 list_del(pos);
2505 kfree(dma_ctrlr);
2506 }
2507 err_slave:
2508 /* resources are stored in link list */
2509 list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
2510 slave_image = list_entry(pos, struct vme_slave_resource, list);
2511 list_del(pos);
2512 kfree(slave_image);
2513 }
2514 err_master:
2515 /* resources are stored in link list */
2516 list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
2517 master_image = list_entry(pos, struct vme_master_resource,
2518 list);
2519 list_del(pos);
2520 kfree(master_image);
2521 }
2522
2523 tsi148_irq_exit(tsi148_bridge, pdev);
2524 err_irq:
2525 err_test:
2526 iounmap(tsi148_device->base);
2527 err_remap:
2528 pci_release_regions(pdev);
2529 err_resource:
2530 pci_disable_device(pdev);
2531 err_enable:
2532 kfree(tsi148_device);
2533 err_driver:
2534 kfree(tsi148_bridge);
2535 err_struct:
2536 return retval;
2537
2538 }
2539
tsi148_remove(struct pci_dev * pdev)2540 static void tsi148_remove(struct pci_dev *pdev)
2541 {
2542 struct list_head *pos = NULL;
2543 struct list_head *tmplist;
2544 struct vme_master_resource *master_image;
2545 struct vme_slave_resource *slave_image;
2546 struct vme_dma_resource *dma_ctrlr;
2547 int i;
2548 struct tsi148_driver *bridge;
2549 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2550
2551 bridge = tsi148_bridge->driver_priv;
2552
2553
2554 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2555
2556 /*
2557 * Shutdown all inbound and outbound windows.
2558 */
2559 for (i = 0; i < 8; i++) {
2560 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2561 TSI148_LCSR_OFFSET_ITAT);
2562 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2563 TSI148_LCSR_OFFSET_OTAT);
2564 }
2565
2566 /*
2567 * Shutdown Location monitor.
2568 */
2569 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2570
2571 /*
2572 * Shutdown CRG map.
2573 */
2574 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2575
2576 /*
2577 * Clear error status.
2578 */
2579 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2580 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2581 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2582
2583 /*
2584 * Remove VIRQ interrupt (if any)
2585 */
2586 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2587 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2588
2589 /*
2590 * Map all Interrupts to PCI INTA
2591 */
2592 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2593 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2594
2595 tsi148_irq_exit(tsi148_bridge, pdev);
2596
2597 vme_unregister_bridge(tsi148_bridge);
2598
2599 tsi148_crcsr_exit(tsi148_bridge, pdev);
2600
2601 /* resources are stored in link list */
2602 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2603 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2604 list_del(pos);
2605 kfree(dma_ctrlr);
2606 }
2607
2608 /* resources are stored in link list */
2609 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2610 slave_image = list_entry(pos, struct vme_slave_resource, list);
2611 list_del(pos);
2612 kfree(slave_image);
2613 }
2614
2615 /* resources are stored in link list */
2616 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2617 master_image = list_entry(pos, struct vme_master_resource,
2618 list);
2619 list_del(pos);
2620 kfree(master_image);
2621 }
2622
2623 iounmap(bridge->base);
2624
2625 pci_release_regions(pdev);
2626
2627 pci_disable_device(pdev);
2628
2629 kfree(tsi148_bridge->driver_priv);
2630
2631 kfree(tsi148_bridge);
2632 }
2633
2634 module_pci_driver(tsi148_driver);
2635
2636 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2637 module_param(err_chk, bool, 0);
2638
2639 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2640 module_param(geoid, int, 0);
2641
2642 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2643 MODULE_LICENSE("GPL");
2644