1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * comedi/drivers/mite.c
4  * Hardware driver for NI Mite PCI interface chip
5  *
6  * COMEDI - Linux Control and Measurement Device Interface
7  * Copyright (C) 1997-2002 David A. Schleef <ds@schleef.org>
8  */
9 
10 /*
11  * The PCI-MIO E series driver was originally written by
12  * Tomasz Motylewski <...>, and ported to comedi by ds.
13  *
14  * References for specifications:
15  *
16  *    321747b.pdf  Register Level Programmer Manual (obsolete)
17  *    321747c.pdf  Register Level Programmer Manual (new)
18  *    DAQ-STC reference manual
19  *
20  * Other possibly relevant info:
21  *
22  *    320517c.pdf  User manual (obsolete)
23  *    320517f.pdf  User manual (new)
24  *    320889a.pdf  delete
25  *    320906c.pdf  maximum signal ratings
26  *    321066a.pdf  about 16x
27  *    321791a.pdf  discontinuation of at-mio-16e-10 rev. c
28  *    321808a.pdf  about at-mio-16e-10 rev P
29  *    321837a.pdf  discontinuation of at-mio-16de-10 rev d
30  *    321838a.pdf  about at-mio-16de-10 rev N
31  *
32  * ISSUES:
33  *
34  */
35 
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 
38 #include <linux/module.h>
39 #include <linux/slab.h>
40 #include <linux/log2.h>
41 
42 #include "../comedi_pci.h"
43 
44 #include "mite.h"
45 
46 /*
47  * Mite registers
48  */
49 #define MITE_UNKNOWN_DMA_BURST_REG	0x28
50 #define UNKNOWN_DMA_BURST_ENABLE_BITS	0x600
51 
52 #define MITE_PCI_CONFIG_OFFSET	0x300
53 #define MITE_CSIGR		0x460			/* chip signature */
54 #define CSIGR_TO_IOWINS(x)	(((x) >> 29) & 0x7)
55 #define CSIGR_TO_WINS(x)	(((x) >> 24) & 0x1f)
56 #define CSIGR_TO_WPDEP(x)	(((x) >> 20) & 0x7)
57 #define CSIGR_TO_DMAC(x)	(((x) >> 16) & 0xf)
58 #define CSIGR_TO_IMODE(x)	(((x) >> 12) & 0x3)	/* pci=0x3 */
59 #define CSIGR_TO_MMODE(x)	(((x) >> 8) & 0x3)	/* minimite=1 */
60 #define CSIGR_TO_TYPE(x)	(((x) >> 4) & 0xf)	/* mite=0, minimite=1 */
61 #define CSIGR_TO_VER(x)		(((x) >> 0) & 0xf)
62 
63 #define MITE_CHAN(x)		(0x500 + 0x100 * (x))
64 #define MITE_CHOR(x)		(0x00 + MITE_CHAN(x))	/* channel operation */
65 #define CHOR_DMARESET		BIT(31)
66 #define CHOR_SET_SEND_TC	BIT(11)
67 #define CHOR_CLR_SEND_TC	BIT(10)
68 #define CHOR_SET_LPAUSE		BIT(9)
69 #define CHOR_CLR_LPAUSE		BIT(8)
70 #define CHOR_CLRDONE		BIT(7)
71 #define CHOR_CLRRB		BIT(6)
72 #define CHOR_CLRLC		BIT(5)
73 #define CHOR_FRESET		BIT(4)
74 #define CHOR_ABORT		BIT(3)	/* stop without emptying fifo */
75 #define CHOR_STOP		BIT(2)	/* stop after emptying fifo */
76 #define CHOR_CONT		BIT(1)
77 #define CHOR_START		BIT(0)
78 #define MITE_CHCR(x)		(0x04 + MITE_CHAN(x))	/* channel control */
79 #define CHCR_SET_DMA_IE		BIT(31)
80 #define CHCR_CLR_DMA_IE		BIT(30)
81 #define CHCR_SET_LINKP_IE	BIT(29)
82 #define CHCR_CLR_LINKP_IE	BIT(28)
83 #define CHCR_SET_SAR_IE		BIT(27)
84 #define CHCR_CLR_SAR_IE		BIT(26)
85 #define CHCR_SET_DONE_IE	BIT(25)
86 #define CHCR_CLR_DONE_IE	BIT(24)
87 #define CHCR_SET_MRDY_IE	BIT(23)
88 #define CHCR_CLR_MRDY_IE	BIT(22)
89 #define CHCR_SET_DRDY_IE	BIT(21)
90 #define CHCR_CLR_DRDY_IE	BIT(20)
91 #define CHCR_SET_LC_IE		BIT(19)
92 #define CHCR_CLR_LC_IE		BIT(18)
93 #define CHCR_SET_CONT_RB_IE	BIT(17)
94 #define CHCR_CLR_CONT_RB_IE	BIT(16)
95 #define CHCR_FIFO(x)		(((x) & 0x1) << 15)
96 #define CHCR_FIFODIS		CHCR_FIFO(1)
97 #define CHCR_FIFO_ON		CHCR_FIFO(0)
98 #define CHCR_BURST(x)		(((x) & 0x1) << 14)
99 #define CHCR_BURSTEN		CHCR_BURST(1)
100 #define CHCR_NO_BURSTEN		CHCR_BURST(0)
101 #define CHCR_BYTE_SWAP_DEVICE	BIT(6)
102 #define CHCR_BYTE_SWAP_MEMORY	BIT(4)
103 #define CHCR_DIR(x)		(((x) & 0x1) << 3)
104 #define CHCR_DEV_TO_MEM		CHCR_DIR(1)
105 #define CHCR_MEM_TO_DEV		CHCR_DIR(0)
106 #define CHCR_MODE(x)		(((x) & 0x7) << 0)
107 #define CHCR_NORMAL		CHCR_MODE(0)
108 #define CHCR_CONTINUE		CHCR_MODE(1)
109 #define CHCR_RINGBUFF		CHCR_MODE(2)
110 #define CHCR_LINKSHORT		CHCR_MODE(4)
111 #define CHCR_LINKLONG		CHCR_MODE(5)
112 #define MITE_TCR(x)		(0x08 + MITE_CHAN(x))	/* transfer count */
113 #define MITE_MCR(x)		(0x0c + MITE_CHAN(x))	/* memory config */
114 #define MITE_MAR(x)		(0x10 + MITE_CHAN(x))	/* memory address */
115 #define MITE_DCR(x)		(0x14 + MITE_CHAN(x))	/* device config */
116 #define DCR_NORMAL		BIT(29)
117 #define MITE_DAR(x)		(0x18 + MITE_CHAN(x))	/* device address */
118 #define MITE_LKCR(x)		(0x1c + MITE_CHAN(x))	/* link config */
119 #define MITE_LKAR(x)		(0x20 + MITE_CHAN(x))	/* link address */
120 #define MITE_LLKAR(x)		(0x24 + MITE_CHAN(x))	/* see tnt5002 manual */
121 #define MITE_BAR(x)		(0x28 + MITE_CHAN(x))	/* base address */
122 #define MITE_BCR(x)		(0x2c + MITE_CHAN(x))	/* base count */
123 #define MITE_SAR(x)		(0x30 + MITE_CHAN(x))	/* ? address */
124 #define MITE_WSCR(x)		(0x34 + MITE_CHAN(x))	/* ? */
125 #define MITE_WSER(x)		(0x38 + MITE_CHAN(x))	/* ? */
126 #define MITE_CHSR(x)		(0x3c + MITE_CHAN(x))	/* channel status */
127 #define CHSR_INT		BIT(31)
128 #define CHSR_LPAUSES		BIT(29)
129 #define CHSR_SARS		BIT(27)
130 #define CHSR_DONE		BIT(25)
131 #define CHSR_MRDY		BIT(23)
132 #define CHSR_DRDY		BIT(21)
133 #define CHSR_LINKC		BIT(19)
134 #define CHSR_CONTS_RB		BIT(17)
135 #define CHSR_ERROR		BIT(15)
136 #define CHSR_SABORT		BIT(14)
137 #define CHSR_HABORT		BIT(13)
138 #define CHSR_STOPS		BIT(12)
139 #define CHSR_OPERR(x)		(((x) & 0x3) << 10)
140 #define CHSR_OPERR_MASK		CHSR_OPERR(3)
141 #define CHSR_OPERR_NOERROR	CHSR_OPERR(0)
142 #define CHSR_OPERR_FIFOERROR	CHSR_OPERR(1)
143 #define CHSR_OPERR_LINKERROR	CHSR_OPERR(1)	/* ??? */
144 #define CHSR_XFERR		BIT(9)
145 #define CHSR_END		BIT(8)
146 #define CHSR_DRQ1		BIT(7)
147 #define CHSR_DRQ0		BIT(6)
148 #define CHSR_LERR(x)		(((x) & 0x3) << 4)
149 #define CHSR_LERR_MASK		CHSR_LERR(3)
150 #define CHSR_LBERR		CHSR_LERR(1)
151 #define CHSR_LRERR		CHSR_LERR(2)
152 #define CHSR_LOERR		CHSR_LERR(3)
153 #define CHSR_MERR(x)		(((x) & 0x3) << 2)
154 #define CHSR_MERR_MASK		CHSR_MERR(3)
155 #define CHSR_MBERR		CHSR_MERR(1)
156 #define CHSR_MRERR		CHSR_MERR(2)
157 #define CHSR_MOERR		CHSR_MERR(3)
158 #define CHSR_DERR(x)		(((x) & 0x3) << 0)
159 #define CHSR_DERR_MASK		CHSR_DERR(3)
160 #define CHSR_DBERR		CHSR_DERR(1)
161 #define CHSR_DRERR		CHSR_DERR(2)
162 #define CHSR_DOERR		CHSR_DERR(3)
163 #define MITE_FCR(x)		(0x40 + MITE_CHAN(x))	/* fifo count */
164 
165 /* common bits for the memory/device/link config registers */
166 #define CR_RL(x)		(((x) & 0x7) << 21)
167 #define CR_REQS(x)		(((x) & 0x7) << 16)
168 #define CR_REQS_MASK		CR_REQS(7)
169 #define CR_ASEQ(x)		(((x) & 0x3) << 10)
170 #define CR_ASEQDONT		CR_ASEQ(0)
171 #define CR_ASEQUP		CR_ASEQ(1)
172 #define CR_ASEQDOWN		CR_ASEQ(2)
173 #define CR_ASEQ_MASK		CR_ASEQ(3)
174 #define CR_PSIZE(x)		(((x) & 0x3) << 8)
175 #define CR_PSIZE8		CR_PSIZE(1)
176 #define CR_PSIZE16		CR_PSIZE(2)
177 #define CR_PSIZE32		CR_PSIZE(3)
178 #define CR_PORT(x)		(((x) & 0x3) << 6)
179 #define CR_PORTCPU		CR_PORT(0)
180 #define CR_PORTIO		CR_PORT(1)
181 #define CR_PORTVXI		CR_PORT(2)
182 #define CR_PORTMXI		CR_PORT(3)
183 #define CR_AMDEVICE		BIT(0)
184 
MITE_IODWBSR_1_WSIZE_bits(unsigned int size)185 static unsigned int MITE_IODWBSR_1_WSIZE_bits(unsigned int size)
186 {
187 	return (ilog2(size) - 1) & 0x1f;
188 }
189 
mite_retry_limit(unsigned int retry_limit)190 static unsigned int mite_retry_limit(unsigned int retry_limit)
191 {
192 	unsigned int value = 0;
193 
194 	if (retry_limit)
195 		value = 1 + ilog2(retry_limit);
196 	if (value > 0x7)
197 		value = 0x7;
198 	return CR_RL(value);
199 }
200 
mite_drq_reqs(unsigned int drq_line)201 static unsigned int mite_drq_reqs(unsigned int drq_line)
202 {
203 	/* This also works on m-series when using channels (drq_line) 4 or 5. */
204 	return CR_REQS((drq_line & 0x3) | 0x4);
205 }
206 
mite_fifo_size(struct mite * mite,unsigned int channel)207 static unsigned int mite_fifo_size(struct mite *mite, unsigned int channel)
208 {
209 	unsigned int fcr_bits = readl(mite->mmio + MITE_FCR(channel));
210 	unsigned int empty_count = (fcr_bits >> 16) & 0xff;
211 	unsigned int full_count = fcr_bits & 0xff;
212 
213 	return empty_count + full_count;
214 }
215 
mite_device_bytes_transferred(struct mite_channel * mite_chan)216 static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
217 {
218 	struct mite *mite = mite_chan->mite;
219 
220 	return readl(mite->mmio + MITE_DAR(mite_chan->channel));
221 }
222 
223 /**
224  * mite_bytes_in_transit() - Returns the number of unread bytes in the fifo.
225  * @mite_chan: MITE dma channel.
226  */
mite_bytes_in_transit(struct mite_channel * mite_chan)227 u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
228 {
229 	struct mite *mite = mite_chan->mite;
230 
231 	return readl(mite->mmio + MITE_FCR(mite_chan->channel)) & 0xff;
232 }
233 EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
234 
235 /* returns lower bound for number of bytes transferred from device to memory */
mite_bytes_written_to_memory_lb(struct mite_channel * mite_chan)236 static u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
237 {
238 	u32 device_byte_count;
239 
240 	device_byte_count = mite_device_bytes_transferred(mite_chan);
241 	return device_byte_count - mite_bytes_in_transit(mite_chan);
242 }
243 
244 /* returns upper bound for number of bytes transferred from device to memory */
mite_bytes_written_to_memory_ub(struct mite_channel * mite_chan)245 static u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
246 {
247 	u32 in_transit_count;
248 
249 	in_transit_count = mite_bytes_in_transit(mite_chan);
250 	return mite_device_bytes_transferred(mite_chan) - in_transit_count;
251 }
252 
253 /* returns lower bound for number of bytes read from memory to device */
mite_bytes_read_from_memory_lb(struct mite_channel * mite_chan)254 static u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
255 {
256 	u32 device_byte_count;
257 
258 	device_byte_count = mite_device_bytes_transferred(mite_chan);
259 	return device_byte_count + mite_bytes_in_transit(mite_chan);
260 }
261 
262 /* returns upper bound for number of bytes read from memory to device */
mite_bytes_read_from_memory_ub(struct mite_channel * mite_chan)263 static u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
264 {
265 	u32 in_transit_count;
266 
267 	in_transit_count = mite_bytes_in_transit(mite_chan);
268 	return mite_device_bytes_transferred(mite_chan) + in_transit_count;
269 }
270 
mite_sync_input_dma(struct mite_channel * mite_chan,struct comedi_subdevice * s)271 static void mite_sync_input_dma(struct mite_channel *mite_chan,
272 				struct comedi_subdevice *s)
273 {
274 	struct comedi_async *async = s->async;
275 	int count;
276 	unsigned int nbytes, old_alloc_count;
277 
278 	old_alloc_count = async->buf_write_alloc_count;
279 	/* write alloc as much as we can */
280 	comedi_buf_write_alloc(s, async->prealloc_bufsz);
281 
282 	nbytes = mite_bytes_written_to_memory_lb(mite_chan);
283 	if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
284 		  old_alloc_count) > 0) {
285 		dev_warn(s->device->class_dev,
286 			 "mite: DMA overwrite of free area\n");
287 		async->events |= COMEDI_CB_OVERFLOW;
288 		return;
289 	}
290 
291 	count = nbytes - async->buf_write_count;
292 	/*
293 	 * it's possible count will be negative due to conservative value
294 	 * returned by mite_bytes_written_to_memory_lb
295 	 */
296 	if (count > 0) {
297 		comedi_buf_write_free(s, count);
298 		comedi_inc_scan_progress(s, count);
299 		async->events |= COMEDI_CB_BLOCK;
300 	}
301 }
302 
mite_sync_output_dma(struct mite_channel * mite_chan,struct comedi_subdevice * s)303 static void mite_sync_output_dma(struct mite_channel *mite_chan,
304 				 struct comedi_subdevice *s)
305 {
306 	struct comedi_async *async = s->async;
307 	struct comedi_cmd *cmd = &async->cmd;
308 	u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
309 	unsigned int old_alloc_count = async->buf_read_alloc_count;
310 	u32 nbytes_ub, nbytes_lb;
311 	int count;
312 	bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
313 
314 	/* read alloc as much as we can */
315 	comedi_buf_read_alloc(s, async->prealloc_bufsz);
316 	nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
317 	if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
318 		nbytes_lb = stop_count;
319 	nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
320 	if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
321 		nbytes_ub = stop_count;
322 
323 	if ((!finite_regen || stop_count > old_alloc_count) &&
324 	    ((int)(nbytes_ub - old_alloc_count) > 0)) {
325 		dev_warn(s->device->class_dev, "mite: DMA underrun\n");
326 		async->events |= COMEDI_CB_OVERFLOW;
327 		return;
328 	}
329 
330 	if (finite_regen) {
331 		/*
332 		 * This is a special case where we continuously output a finite
333 		 * buffer.  In this case, we do not free any of the memory,
334 		 * hence we expect that old_alloc_count will reach a maximum of
335 		 * stop_count bytes.
336 		 */
337 		return;
338 	}
339 
340 	count = nbytes_lb - async->buf_read_count;
341 	if (count > 0) {
342 		comedi_buf_read_free(s, count);
343 		async->events |= COMEDI_CB_BLOCK;
344 	}
345 }
346 
347 /**
348  * mite_sync_dma() - Sync the MITE dma with the COMEDI async buffer.
349  * @mite_chan: MITE dma channel.
350  * @s: COMEDI subdevice.
351  */
mite_sync_dma(struct mite_channel * mite_chan,struct comedi_subdevice * s)352 void mite_sync_dma(struct mite_channel *mite_chan, struct comedi_subdevice *s)
353 {
354 	if (mite_chan->dir == COMEDI_INPUT)
355 		mite_sync_input_dma(mite_chan, s);
356 	else
357 		mite_sync_output_dma(mite_chan, s);
358 }
359 EXPORT_SYMBOL_GPL(mite_sync_dma);
360 
mite_get_status(struct mite_channel * mite_chan)361 static unsigned int mite_get_status(struct mite_channel *mite_chan)
362 {
363 	struct mite *mite = mite_chan->mite;
364 	unsigned int status;
365 	unsigned long flags;
366 
367 	spin_lock_irqsave(&mite->lock, flags);
368 	status = readl(mite->mmio + MITE_CHSR(mite_chan->channel));
369 	if (status & CHSR_DONE) {
370 		mite_chan->done = 1;
371 		writel(CHOR_CLRDONE,
372 		       mite->mmio + MITE_CHOR(mite_chan->channel));
373 	}
374 	mmiowb();
375 	spin_unlock_irqrestore(&mite->lock, flags);
376 	return status;
377 }
378 
379 /**
380  * mite_ack_linkc() - Check and ack the LINKC interrupt,
381  * @mite_chan: MITE dma channel.
382  * @s: COMEDI subdevice.
383  * @sync: flag to force a mite_sync_dma().
384  *
385  * This will also ack the DONE interrupt if active.
386  */
mite_ack_linkc(struct mite_channel * mite_chan,struct comedi_subdevice * s,bool sync)387 void mite_ack_linkc(struct mite_channel *mite_chan,
388 		    struct comedi_subdevice *s,
389 		    bool sync)
390 {
391 	struct mite *mite = mite_chan->mite;
392 	unsigned int status;
393 
394 	status = mite_get_status(mite_chan);
395 	if (status & CHSR_LINKC) {
396 		writel(CHOR_CLRLC, mite->mmio + MITE_CHOR(mite_chan->channel));
397 		sync = true;
398 	}
399 	if (sync)
400 		mite_sync_dma(mite_chan, s);
401 
402 	if (status & CHSR_XFERR) {
403 		dev_err(s->device->class_dev,
404 			"mite: transfer error %08x\n", status);
405 		s->async->events |= COMEDI_CB_ERROR;
406 	}
407 }
408 EXPORT_SYMBOL_GPL(mite_ack_linkc);
409 
410 /**
411  * mite_done() - Check is a MITE dma transfer is complete.
412  * @mite_chan: MITE dma channel.
413  *
414  * This will also ack the DONE interrupt if active.
415  */
mite_done(struct mite_channel * mite_chan)416 int mite_done(struct mite_channel *mite_chan)
417 {
418 	struct mite *mite = mite_chan->mite;
419 	unsigned long flags;
420 	int done;
421 
422 	mite_get_status(mite_chan);
423 	spin_lock_irqsave(&mite->lock, flags);
424 	done = mite_chan->done;
425 	spin_unlock_irqrestore(&mite->lock, flags);
426 	return done;
427 }
428 EXPORT_SYMBOL_GPL(mite_done);
429 
mite_dma_reset(struct mite_channel * mite_chan)430 static void mite_dma_reset(struct mite_channel *mite_chan)
431 {
432 	writel(CHOR_DMARESET | CHOR_FRESET,
433 	       mite_chan->mite->mmio + MITE_CHOR(mite_chan->channel));
434 }
435 
436 /**
437  * mite_dma_arm() - Start a MITE dma transfer.
438  * @mite_chan: MITE dma channel.
439  */
mite_dma_arm(struct mite_channel * mite_chan)440 void mite_dma_arm(struct mite_channel *mite_chan)
441 {
442 	struct mite *mite = mite_chan->mite;
443 	unsigned long flags;
444 
445 	/*
446 	 * memory barrier is intended to insure any twiddling with the buffer
447 	 * is done before writing to the mite to arm dma transfer
448 	 */
449 	smp_mb();
450 	spin_lock_irqsave(&mite->lock, flags);
451 	mite_chan->done = 0;
452 	/* arm */
453 	writel(CHOR_START, mite->mmio + MITE_CHOR(mite_chan->channel));
454 	mmiowb();
455 	spin_unlock_irqrestore(&mite->lock, flags);
456 }
457 EXPORT_SYMBOL_GPL(mite_dma_arm);
458 
459 /**
460  * mite_dma_disarm() - Stop a MITE dma transfer.
461  * @mite_chan: MITE dma channel.
462  */
mite_dma_disarm(struct mite_channel * mite_chan)463 void mite_dma_disarm(struct mite_channel *mite_chan)
464 {
465 	struct mite *mite = mite_chan->mite;
466 
467 	/* disarm */
468 	writel(CHOR_ABORT, mite->mmio + MITE_CHOR(mite_chan->channel));
469 }
470 EXPORT_SYMBOL_GPL(mite_dma_disarm);
471 
472 /**
473  * mite_prep_dma() - Prepare a MITE dma channel for transfers.
474  * @mite_chan: MITE dma channel.
475  * @num_device_bits: device transfer size (8, 16, or 32-bits).
476  * @num_memory_bits: memory transfer size (8, 16, or 32-bits).
477  */
mite_prep_dma(struct mite_channel * mite_chan,unsigned int num_device_bits,unsigned int num_memory_bits)478 void mite_prep_dma(struct mite_channel *mite_chan,
479 		   unsigned int num_device_bits, unsigned int num_memory_bits)
480 {
481 	struct mite *mite = mite_chan->mite;
482 	unsigned int chcr, mcr, dcr, lkcr;
483 
484 	mite_dma_reset(mite_chan);
485 
486 	/* short link chaining mode */
487 	chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
488 	    CHCR_BURSTEN;
489 	/*
490 	 * Link Complete Interrupt: interrupt every time a link
491 	 * in MITE_RING is completed. This can generate a lot of
492 	 * extra interrupts, but right now we update the values
493 	 * of buf_int_ptr and buf_int_count at each interrupt. A
494 	 * better method is to poll the MITE before each user
495 	 * "read()" to calculate the number of bytes available.
496 	 */
497 	chcr |= CHCR_SET_LC_IE;
498 	if (num_memory_bits == 32 && num_device_bits == 16) {
499 		/*
500 		 * Doing a combined 32 and 16 bit byteswap gets the 16 bit
501 		 * samples into the fifo in the right order. Tested doing 32 bit
502 		 * memory to 16 bit device transfers to the analog out of a
503 		 * pxi-6281, which has mite version = 1, type = 4. This also
504 		 * works for dma reads from the counters on e-series boards.
505 		 */
506 		chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY;
507 	}
508 	if (mite_chan->dir == COMEDI_INPUT)
509 		chcr |= CHCR_DEV_TO_MEM;
510 
511 	writel(chcr, mite->mmio + MITE_CHCR(mite_chan->channel));
512 
513 	/* to/from memory */
514 	mcr = mite_retry_limit(64) | CR_ASEQUP;
515 	switch (num_memory_bits) {
516 	case 8:
517 		mcr |= CR_PSIZE8;
518 		break;
519 	case 16:
520 		mcr |= CR_PSIZE16;
521 		break;
522 	case 32:
523 		mcr |= CR_PSIZE32;
524 		break;
525 	default:
526 		pr_warn("bug! invalid mem bit width for dma transfer\n");
527 		break;
528 	}
529 	writel(mcr, mite->mmio + MITE_MCR(mite_chan->channel));
530 
531 	/* from/to device */
532 	dcr = mite_retry_limit(64) | CR_ASEQUP;
533 	dcr |= CR_PORTIO | CR_AMDEVICE | mite_drq_reqs(mite_chan->channel);
534 	switch (num_device_bits) {
535 	case 8:
536 		dcr |= CR_PSIZE8;
537 		break;
538 	case 16:
539 		dcr |= CR_PSIZE16;
540 		break;
541 	case 32:
542 		dcr |= CR_PSIZE32;
543 		break;
544 	default:
545 		pr_warn("bug! invalid dev bit width for dma transfer\n");
546 		break;
547 	}
548 	writel(dcr, mite->mmio + MITE_DCR(mite_chan->channel));
549 
550 	/* reset the DAR */
551 	writel(0, mite->mmio + MITE_DAR(mite_chan->channel));
552 
553 	/* the link is 32bits */
554 	lkcr = mite_retry_limit(64) | CR_ASEQUP | CR_PSIZE32;
555 	writel(lkcr, mite->mmio + MITE_LKCR(mite_chan->channel));
556 
557 	/* starting address for link chaining */
558 	writel(mite_chan->ring->dma_addr,
559 	       mite->mmio + MITE_LKAR(mite_chan->channel));
560 }
561 EXPORT_SYMBOL_GPL(mite_prep_dma);
562 
__mite_request_channel(struct mite * mite,struct mite_ring * ring,unsigned int min_channel,unsigned int max_channel)563 static struct mite_channel *__mite_request_channel(struct mite *mite,
564 						   struct mite_ring *ring,
565 						   unsigned int min_channel,
566 						   unsigned int max_channel)
567 {
568 	struct mite_channel *mite_chan = NULL;
569 	unsigned long flags;
570 	int i;
571 
572 	/*
573 	 * spin lock so mite_release_channel can be called safely
574 	 * from interrupts
575 	 */
576 	spin_lock_irqsave(&mite->lock, flags);
577 	for (i = min_channel; i <= max_channel; ++i) {
578 		mite_chan = &mite->channels[i];
579 		if (!mite_chan->ring) {
580 			mite_chan->ring = ring;
581 			break;
582 		}
583 		mite_chan = NULL;
584 	}
585 	spin_unlock_irqrestore(&mite->lock, flags);
586 	return mite_chan;
587 }
588 
589 /**
590  * mite_request_channel_in_range() - Request a MITE dma channel.
591  * @mite: MITE device.
592  * @ring: MITE dma ring.
593  * @min_channel: minimum channel index to use.
594  * @max_channel: maximum channel index to use.
595  */
mite_request_channel_in_range(struct mite * mite,struct mite_ring * ring,unsigned int min_channel,unsigned int max_channel)596 struct mite_channel *mite_request_channel_in_range(struct mite *mite,
597 						   struct mite_ring *ring,
598 						   unsigned int min_channel,
599 						   unsigned int max_channel)
600 {
601 	return __mite_request_channel(mite, ring, min_channel, max_channel);
602 }
603 EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
604 
605 /**
606  * mite_request_channel() - Request a MITE dma channel.
607  * @mite: MITE device.
608  * @ring: MITE dma ring.
609  */
mite_request_channel(struct mite * mite,struct mite_ring * ring)610 struct mite_channel *mite_request_channel(struct mite *mite,
611 					  struct mite_ring *ring)
612 {
613 	return __mite_request_channel(mite, ring, 0, mite->num_channels - 1);
614 }
615 EXPORT_SYMBOL_GPL(mite_request_channel);
616 
617 /**
618  * mite_release_channel() - Release a MITE dma channel.
619  * @mite_chan: MITE dma channel.
620  */
mite_release_channel(struct mite_channel * mite_chan)621 void mite_release_channel(struct mite_channel *mite_chan)
622 {
623 	struct mite *mite = mite_chan->mite;
624 	unsigned long flags;
625 
626 	/* spin lock to prevent races with mite_request_channel */
627 	spin_lock_irqsave(&mite->lock, flags);
628 	if (mite_chan->ring) {
629 		mite_dma_disarm(mite_chan);
630 		mite_dma_reset(mite_chan);
631 		/*
632 		 * disable all channel's interrupts (do it after disarm/reset so
633 		 * MITE_CHCR reg isn't changed while dma is still active!)
634 		 */
635 		writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
636 		       CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
637 		       CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
638 		       CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
639 		       mite->mmio + MITE_CHCR(mite_chan->channel));
640 		mite_chan->ring = NULL;
641 		mmiowb();
642 	}
643 	spin_unlock_irqrestore(&mite->lock, flags);
644 }
645 EXPORT_SYMBOL_GPL(mite_release_channel);
646 
647 /**
648  * mite_init_ring_descriptors() - Initialize a MITE dma ring descriptors.
649  * @ring: MITE dma ring.
650  * @s: COMEDI subdevice.
651  * @nbytes: the size of the dma ring (in bytes).
652  *
653  * Initializes the ring buffer descriptors to provide correct DMA transfer
654  * links to the exact amount of memory required. When the ring buffer is
655  * allocated by mite_buf_change(), the default is to initialize the ring
656  * to refer to the entire DMA data buffer. A command may call this function
657  * later to re-initialize and shorten the amount of memory that will be
658  * transferred.
659  */
mite_init_ring_descriptors(struct mite_ring * ring,struct comedi_subdevice * s,unsigned int nbytes)660 int mite_init_ring_descriptors(struct mite_ring *ring,
661 			       struct comedi_subdevice *s,
662 			       unsigned int nbytes)
663 {
664 	struct comedi_async *async = s->async;
665 	struct mite_dma_desc *desc = NULL;
666 	unsigned int n_full_links = nbytes >> PAGE_SHIFT;
667 	unsigned int remainder = nbytes % PAGE_SIZE;
668 	int i;
669 
670 	dev_dbg(s->device->class_dev,
671 		"mite: init ring buffer to %u bytes\n", nbytes);
672 
673 	if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
674 		dev_err(s->device->class_dev,
675 			"mite: ring buffer too small for requested init\n");
676 		return -ENOMEM;
677 	}
678 
679 	/* We set the descriptors for all full links. */
680 	for (i = 0; i < n_full_links; ++i) {
681 		desc = &ring->descs[i];
682 		desc->count = cpu_to_le32(PAGE_SIZE);
683 		desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
684 		desc->next = cpu_to_le32(ring->dma_addr +
685 					 (i + 1) * sizeof(*desc));
686 	}
687 
688 	/* the last link is either a remainder or was a full link. */
689 	if (remainder > 0) {
690 		desc = &ring->descs[i];
691 		/* set the lesser count for the remainder link */
692 		desc->count = cpu_to_le32(remainder);
693 		desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
694 	}
695 
696 	/* Assign the last link->next to point back to the head of the list. */
697 	desc->next = cpu_to_le32(ring->dma_addr);
698 
699 	/*
700 	 * barrier is meant to insure that all the writes to the dma descriptors
701 	 * have completed before the dma controller is commanded to read them
702 	 */
703 	smp_wmb();
704 	return 0;
705 }
706 EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
707 
mite_free_dma_descs(struct mite_ring * ring)708 static void mite_free_dma_descs(struct mite_ring *ring)
709 {
710 	struct mite_dma_desc *descs = ring->descs;
711 
712 	if (descs) {
713 		dma_free_coherent(ring->hw_dev,
714 				  ring->n_links * sizeof(*descs),
715 				  descs, ring->dma_addr);
716 		ring->descs = NULL;
717 		ring->dma_addr = 0;
718 		ring->n_links = 0;
719 	}
720 }
721 
722 /**
723  * mite_buf_change() - COMEDI subdevice (*buf_change) for a MITE dma ring.
724  * @ring: MITE dma ring.
725  * @s: COMEDI subdevice.
726  */
mite_buf_change(struct mite_ring * ring,struct comedi_subdevice * s)727 int mite_buf_change(struct mite_ring *ring, struct comedi_subdevice *s)
728 {
729 	struct comedi_async *async = s->async;
730 	struct mite_dma_desc *descs;
731 	unsigned int n_links;
732 
733 	mite_free_dma_descs(ring);
734 
735 	if (async->prealloc_bufsz == 0)
736 		return 0;
737 
738 	n_links = async->prealloc_bufsz >> PAGE_SHIFT;
739 
740 	descs = dma_alloc_coherent(ring->hw_dev,
741 				   n_links * sizeof(*descs),
742 				   &ring->dma_addr, GFP_KERNEL);
743 	if (!descs) {
744 		dev_err(s->device->class_dev,
745 			"mite: ring buffer allocation failed\n");
746 		return -ENOMEM;
747 	}
748 	ring->descs = descs;
749 	ring->n_links = n_links;
750 
751 	return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
752 }
753 EXPORT_SYMBOL_GPL(mite_buf_change);
754 
755 /**
756  * mite_alloc_ring() - Allocate a MITE dma ring.
757  * @mite: MITE device.
758  */
mite_alloc_ring(struct mite * mite)759 struct mite_ring *mite_alloc_ring(struct mite *mite)
760 {
761 	struct mite_ring *ring;
762 
763 	ring = kmalloc(sizeof(*ring), GFP_KERNEL);
764 	if (!ring)
765 		return NULL;
766 	ring->hw_dev = get_device(&mite->pcidev->dev);
767 	if (!ring->hw_dev) {
768 		kfree(ring);
769 		return NULL;
770 	}
771 	ring->n_links = 0;
772 	ring->descs = NULL;
773 	ring->dma_addr = 0;
774 	return ring;
775 }
776 EXPORT_SYMBOL_GPL(mite_alloc_ring);
777 
778 /**
779  * mite_free_ring() - Free a MITE dma ring and its descriptors.
780  * @ring: MITE dma ring.
781  */
mite_free_ring(struct mite_ring * ring)782 void mite_free_ring(struct mite_ring *ring)
783 {
784 	if (ring) {
785 		mite_free_dma_descs(ring);
786 		put_device(ring->hw_dev);
787 		kfree(ring);
788 	}
789 }
790 EXPORT_SYMBOL_GPL(mite_free_ring);
791 
mite_setup(struct comedi_device * dev,struct mite * mite,bool use_win1)792 static int mite_setup(struct comedi_device *dev, struct mite *mite,
793 		      bool use_win1)
794 {
795 	resource_size_t daq_phys_addr;
796 	unsigned long length;
797 	int i;
798 	u32 csigr_bits;
799 	unsigned int unknown_dma_burst_bits;
800 	unsigned int wpdep;
801 
802 	pci_set_master(mite->pcidev);
803 
804 	mite->mmio = pci_ioremap_bar(mite->pcidev, 0);
805 	if (!mite->mmio)
806 		return -ENOMEM;
807 
808 	dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
809 	if (!dev->mmio)
810 		return -ENOMEM;
811 	daq_phys_addr = pci_resource_start(mite->pcidev, 1);
812 	length = pci_resource_len(mite->pcidev, 1);
813 
814 	if (use_win1) {
815 		writel(0, mite->mmio + MITE_IODWBSR);
816 		dev_dbg(dev->class_dev,
817 			"mite: using I/O Window Base Size register 1\n");
818 		writel(daq_phys_addr | WENAB |
819 		       MITE_IODWBSR_1_WSIZE_bits(length),
820 		       mite->mmio + MITE_IODWBSR_1);
821 		writel(0, mite->mmio + MITE_IODWCR_1);
822 	} else {
823 		writel(daq_phys_addr | WENAB, mite->mmio + MITE_IODWBSR);
824 	}
825 	/*
826 	 * Make sure dma bursts work. I got this from running a bus analyzer
827 	 * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
828 	 * of 0x61f and bursts worked. 6281 powered up with register value of
829 	 * 0x1f and bursts didn't work. The NI windows driver reads the
830 	 * register, then does a bitwise-or of 0x600 with it and writes it back.
831 	 *
832 	 * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
833 	 * written and read back.  The bits 0x1f always read as 1.
834 	 * The rest always read as zero.
835 	 */
836 	unknown_dma_burst_bits = readl(mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
837 	unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
838 	writel(unknown_dma_burst_bits, mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
839 
840 	csigr_bits = readl(mite->mmio + MITE_CSIGR);
841 	mite->num_channels = CSIGR_TO_DMAC(csigr_bits);
842 	if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
843 		dev_warn(dev->class_dev,
844 			 "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
845 			 mite->num_channels, MAX_MITE_DMA_CHANNELS);
846 		mite->num_channels = MAX_MITE_DMA_CHANNELS;
847 	}
848 
849 	/* get the wpdep bits and convert it to the write port fifo depth */
850 	wpdep = CSIGR_TO_WPDEP(csigr_bits);
851 	if (wpdep)
852 		wpdep = BIT(wpdep);
853 
854 	dev_dbg(dev->class_dev,
855 		"mite: version = %i, type = %i, mite mode = %i, interface mode = %i\n",
856 		CSIGR_TO_VER(csigr_bits), CSIGR_TO_TYPE(csigr_bits),
857 		CSIGR_TO_MMODE(csigr_bits), CSIGR_TO_IMODE(csigr_bits));
858 	dev_dbg(dev->class_dev,
859 		"mite: num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
860 		CSIGR_TO_DMAC(csigr_bits), wpdep,
861 		CSIGR_TO_WINS(csigr_bits), CSIGR_TO_IOWINS(csigr_bits));
862 
863 	for (i = 0; i < mite->num_channels; i++) {
864 		writel(CHOR_DMARESET, mite->mmio + MITE_CHOR(i));
865 		/* disable interrupts */
866 		writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
867 		       CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
868 		       CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
869 		       mite->mmio + MITE_CHCR(i));
870 	}
871 	mite->fifo_size = mite_fifo_size(mite, 0);
872 	dev_dbg(dev->class_dev, "mite: fifo size is %i.\n", mite->fifo_size);
873 	return 0;
874 }
875 
876 /**
877  * mite_attach() - Allocate and initialize a MITE device for a comedi driver.
878  * @dev: COMEDI device.
879  * @use_win1: flag to use I/O Window 1 instead of I/O Window 0.
880  *
881  * Called by a COMEDI drivers (*auto_attach).
882  *
883  * Returns a pointer to the MITE device on success, or NULL if the MITE cannot
884  * be allocated or remapped.
885  */
mite_attach(struct comedi_device * dev,bool use_win1)886 struct mite *mite_attach(struct comedi_device *dev, bool use_win1)
887 {
888 	struct pci_dev *pcidev = comedi_to_pci_dev(dev);
889 	struct mite *mite;
890 	unsigned int i;
891 	int ret;
892 
893 	mite = kzalloc(sizeof(*mite), GFP_KERNEL);
894 	if (!mite)
895 		return NULL;
896 
897 	spin_lock_init(&mite->lock);
898 	mite->pcidev = pcidev;
899 	for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
900 		mite->channels[i].mite = mite;
901 		mite->channels[i].channel = i;
902 		mite->channels[i].done = 1;
903 	}
904 
905 	ret = mite_setup(dev, mite, use_win1);
906 	if (ret) {
907 		if (mite->mmio)
908 			iounmap(mite->mmio);
909 		kfree(mite);
910 		return NULL;
911 	}
912 
913 	return mite;
914 }
915 EXPORT_SYMBOL_GPL(mite_attach);
916 
917 /**
918  * mite_detach() - Unmap and free a MITE device for a comedi driver.
919  * @mite: MITE device.
920  *
921  * Called by a COMEDI drivers (*detach).
922  */
mite_detach(struct mite * mite)923 void mite_detach(struct mite *mite)
924 {
925 	if (!mite)
926 		return;
927 
928 	if (mite->mmio)
929 		iounmap(mite->mmio);
930 
931 	kfree(mite);
932 }
933 EXPORT_SYMBOL_GPL(mite_detach);
934 
mite_module_init(void)935 static int __init mite_module_init(void)
936 {
937 	return 0;
938 }
939 module_init(mite_module_init);
940 
mite_module_exit(void)941 static void __exit mite_module_exit(void)
942 {
943 }
944 module_exit(mite_module_exit);
945 
946 MODULE_AUTHOR("Comedi http://www.comedi.org");
947 MODULE_DESCRIPTION("Comedi helper for NI Mite PCI interface chip");
948 MODULE_LICENSE("GPL");
949