1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * comedi_buf.c
4  *
5  * COMEDI - Linux Control and Measurement Device Interface
6  * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
7  * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
8  */
9 
10 #include <linux/vmalloc.h>
11 #include <linux/slab.h>
12 
13 #include "comedidev.h"
14 #include "comedi_internal.h"
15 
16 #ifdef PAGE_KERNEL_NOCACHE
17 #define COMEDI_PAGE_PROTECTION		PAGE_KERNEL_NOCACHE
18 #else
19 #define COMEDI_PAGE_PROTECTION		PAGE_KERNEL
20 #endif
21 
comedi_buf_map_kref_release(struct kref * kref)22 static void comedi_buf_map_kref_release(struct kref *kref)
23 {
24 	struct comedi_buf_map *bm =
25 		container_of(kref, struct comedi_buf_map, refcount);
26 	struct comedi_buf_page *buf;
27 	unsigned int i;
28 
29 	if (bm->page_list) {
30 		for (i = 0; i < bm->n_pages; i++) {
31 			buf = &bm->page_list[i];
32 			clear_bit(PG_reserved,
33 				  &(virt_to_page(buf->virt_addr)->flags));
34 			if (bm->dma_dir != DMA_NONE) {
35 #ifdef CONFIG_HAS_DMA
36 				dma_free_coherent(bm->dma_hw_dev,
37 						  PAGE_SIZE,
38 						  buf->virt_addr,
39 						  buf->dma_addr);
40 #endif
41 			} else {
42 				free_page((unsigned long)buf->virt_addr);
43 			}
44 		}
45 		vfree(bm->page_list);
46 	}
47 	if (bm->dma_dir != DMA_NONE)
48 		put_device(bm->dma_hw_dev);
49 	kfree(bm);
50 }
51 
__comedi_buf_free(struct comedi_device * dev,struct comedi_subdevice * s)52 static void __comedi_buf_free(struct comedi_device *dev,
53 			      struct comedi_subdevice *s)
54 {
55 	struct comedi_async *async = s->async;
56 	struct comedi_buf_map *bm;
57 	unsigned long flags;
58 
59 	if (async->prealloc_buf) {
60 		vunmap(async->prealloc_buf);
61 		async->prealloc_buf = NULL;
62 		async->prealloc_bufsz = 0;
63 	}
64 
65 	spin_lock_irqsave(&s->spin_lock, flags);
66 	bm = async->buf_map;
67 	async->buf_map = NULL;
68 	spin_unlock_irqrestore(&s->spin_lock, flags);
69 	comedi_buf_map_put(bm);
70 }
71 
__comedi_buf_alloc(struct comedi_device * dev,struct comedi_subdevice * s,unsigned int n_pages)72 static void __comedi_buf_alloc(struct comedi_device *dev,
73 			       struct comedi_subdevice *s,
74 			       unsigned int n_pages)
75 {
76 	struct comedi_async *async = s->async;
77 	struct page **pages = NULL;
78 	struct comedi_buf_map *bm;
79 	struct comedi_buf_page *buf;
80 	unsigned long flags;
81 	unsigned int i;
82 
83 	if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
84 		dev_err(dev->class_dev,
85 			"dma buffer allocation not supported\n");
86 		return;
87 	}
88 
89 	bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
90 	if (!bm)
91 		return;
92 
93 	kref_init(&bm->refcount);
94 	spin_lock_irqsave(&s->spin_lock, flags);
95 	async->buf_map = bm;
96 	spin_unlock_irqrestore(&s->spin_lock, flags);
97 	bm->dma_dir = s->async_dma_dir;
98 	if (bm->dma_dir != DMA_NONE)
99 		/* Need ref to hardware device to free buffer later. */
100 		bm->dma_hw_dev = get_device(dev->hw_dev);
101 
102 	bm->page_list = vzalloc(sizeof(*buf) * n_pages);
103 	if (bm->page_list)
104 		pages = vmalloc(sizeof(struct page *) * n_pages);
105 
106 	if (!pages)
107 		return;
108 
109 	for (i = 0; i < n_pages; i++) {
110 		buf = &bm->page_list[i];
111 		if (bm->dma_dir != DMA_NONE)
112 #ifdef CONFIG_HAS_DMA
113 			buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
114 							    PAGE_SIZE,
115 							    &buf->dma_addr,
116 							    GFP_KERNEL |
117 							    __GFP_COMP);
118 #else
119 			break;
120 #endif
121 		else
122 			buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
123 		if (!buf->virt_addr)
124 			break;
125 
126 		set_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags));
127 
128 		pages[i] = virt_to_page(buf->virt_addr);
129 	}
130 	spin_lock_irqsave(&s->spin_lock, flags);
131 	bm->n_pages = i;
132 	spin_unlock_irqrestore(&s->spin_lock, flags);
133 
134 	/* vmap the prealloc_buf if all the pages were allocated */
135 	if (i == n_pages)
136 		async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
137 					   COMEDI_PAGE_PROTECTION);
138 
139 	vfree(pages);
140 }
141 
comedi_buf_map_get(struct comedi_buf_map * bm)142 void comedi_buf_map_get(struct comedi_buf_map *bm)
143 {
144 	if (bm)
145 		kref_get(&bm->refcount);
146 }
147 
comedi_buf_map_put(struct comedi_buf_map * bm)148 int comedi_buf_map_put(struct comedi_buf_map *bm)
149 {
150 	if (bm)
151 		return kref_put(&bm->refcount, comedi_buf_map_kref_release);
152 	return 1;
153 }
154 
155 /* helper for "access" vm operation */
comedi_buf_map_access(struct comedi_buf_map * bm,unsigned long offset,void * buf,int len,int write)156 int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
157 			  void *buf, int len, int write)
158 {
159 	unsigned int pgoff = offset_in_page(offset);
160 	unsigned long pg = offset >> PAGE_SHIFT;
161 	int done = 0;
162 
163 	while (done < len && pg < bm->n_pages) {
164 		int l = min_t(int, len - done, PAGE_SIZE - pgoff);
165 		void *b = bm->page_list[pg].virt_addr + pgoff;
166 
167 		if (write)
168 			memcpy(b, buf, l);
169 		else
170 			memcpy(buf, b, l);
171 		buf += l;
172 		done += l;
173 		pg++;
174 		pgoff = 0;
175 	}
176 	return done;
177 }
178 
179 /* returns s->async->buf_map and increments its kref refcount */
180 struct comedi_buf_map *
comedi_buf_map_from_subdev_get(struct comedi_subdevice * s)181 comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
182 {
183 	struct comedi_async *async = s->async;
184 	struct comedi_buf_map *bm = NULL;
185 	unsigned long flags;
186 
187 	if (!async)
188 		return NULL;
189 
190 	spin_lock_irqsave(&s->spin_lock, flags);
191 	bm = async->buf_map;
192 	/* only want it if buffer pages allocated */
193 	if (bm && bm->n_pages)
194 		comedi_buf_map_get(bm);
195 	else
196 		bm = NULL;
197 	spin_unlock_irqrestore(&s->spin_lock, flags);
198 
199 	return bm;
200 }
201 
comedi_buf_is_mmapped(struct comedi_subdevice * s)202 bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
203 {
204 	struct comedi_buf_map *bm = s->async->buf_map;
205 
206 	return bm && (kref_read(&bm->refcount) > 1);
207 }
208 
comedi_buf_alloc(struct comedi_device * dev,struct comedi_subdevice * s,unsigned long new_size)209 int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
210 		     unsigned long new_size)
211 {
212 	struct comedi_async *async = s->async;
213 
214 	/* Round up new_size to multiple of PAGE_SIZE */
215 	new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
216 
217 	/* if no change is required, do nothing */
218 	if (async->prealloc_buf && async->prealloc_bufsz == new_size)
219 		return 0;
220 
221 	/* deallocate old buffer */
222 	__comedi_buf_free(dev, s);
223 
224 	/* allocate new buffer */
225 	if (new_size) {
226 		unsigned int n_pages = new_size >> PAGE_SHIFT;
227 
228 		__comedi_buf_alloc(dev, s, n_pages);
229 
230 		if (!async->prealloc_buf) {
231 			/* allocation failed */
232 			__comedi_buf_free(dev, s);
233 			return -ENOMEM;
234 		}
235 	}
236 	async->prealloc_bufsz = new_size;
237 
238 	return 0;
239 }
240 
comedi_buf_reset(struct comedi_subdevice * s)241 void comedi_buf_reset(struct comedi_subdevice *s)
242 {
243 	struct comedi_async *async = s->async;
244 
245 	async->buf_write_alloc_count = 0;
246 	async->buf_write_count = 0;
247 	async->buf_read_alloc_count = 0;
248 	async->buf_read_count = 0;
249 
250 	async->buf_write_ptr = 0;
251 	async->buf_read_ptr = 0;
252 
253 	async->cur_chan = 0;
254 	async->scans_done = 0;
255 	async->scan_progress = 0;
256 	async->munge_chan = 0;
257 	async->munge_count = 0;
258 	async->munge_ptr = 0;
259 
260 	async->events = 0;
261 }
262 
comedi_buf_write_n_unalloc(struct comedi_subdevice * s)263 static unsigned int comedi_buf_write_n_unalloc(struct comedi_subdevice *s)
264 {
265 	struct comedi_async *async = s->async;
266 	unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
267 
268 	return free_end - async->buf_write_alloc_count;
269 }
270 
comedi_buf_write_n_available(struct comedi_subdevice * s)271 unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
272 {
273 	struct comedi_async *async = s->async;
274 	unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
275 
276 	return free_end - async->buf_write_count;
277 }
278 
279 /**
280  * comedi_buf_write_alloc() - Reserve buffer space for writing
281  * @s: COMEDI subdevice.
282  * @nbytes: Maximum space to reserve in bytes.
283  *
284  * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
285  * data buffer associated with the subdevice.  The amount reserved is limited
286  * by the space available.
287  *
288  * Return: The amount of space reserved in bytes.
289  */
comedi_buf_write_alloc(struct comedi_subdevice * s,unsigned int nbytes)290 unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
291 				    unsigned int nbytes)
292 {
293 	struct comedi_async *async = s->async;
294 	unsigned int unalloc = comedi_buf_write_n_unalloc(s);
295 
296 	if (nbytes > unalloc)
297 		nbytes = unalloc;
298 
299 	async->buf_write_alloc_count += nbytes;
300 
301 	/*
302 	 * ensure the async buffer 'counts' are read and updated
303 	 * before we write data to the write-alloc'ed buffer space
304 	 */
305 	smp_mb();
306 
307 	return nbytes;
308 }
309 EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
310 
311 /*
312  * munging is applied to data by core as it passes between user
313  * and kernel space
314  */
comedi_buf_munge(struct comedi_subdevice * s,unsigned int num_bytes)315 static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
316 				     unsigned int num_bytes)
317 {
318 	struct comedi_async *async = s->async;
319 	unsigned int count = 0;
320 	const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
321 
322 	if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
323 		async->munge_count += num_bytes;
324 		count = num_bytes;
325 	} else {
326 		/* don't munge partial samples */
327 		num_bytes -= num_bytes % num_sample_bytes;
328 		while (count < num_bytes) {
329 			int block_size = num_bytes - count;
330 			unsigned int buf_end;
331 
332 			buf_end = async->prealloc_bufsz - async->munge_ptr;
333 			if (block_size > buf_end)
334 				block_size = buf_end;
335 
336 			s->munge(s->device, s,
337 				 async->prealloc_buf + async->munge_ptr,
338 				 block_size, async->munge_chan);
339 
340 			/*
341 			 * ensure data is munged in buffer before the
342 			 * async buffer munge_count is incremented
343 			 */
344 			smp_wmb();
345 
346 			async->munge_chan += block_size / num_sample_bytes;
347 			async->munge_chan %= async->cmd.chanlist_len;
348 			async->munge_count += block_size;
349 			async->munge_ptr += block_size;
350 			async->munge_ptr %= async->prealloc_bufsz;
351 			count += block_size;
352 		}
353 	}
354 
355 	return count;
356 }
357 
comedi_buf_write_n_allocated(struct comedi_subdevice * s)358 unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
359 {
360 	struct comedi_async *async = s->async;
361 
362 	return async->buf_write_alloc_count - async->buf_write_count;
363 }
364 
365 /**
366  * comedi_buf_write_free() - Free buffer space after it is written
367  * @s: COMEDI subdevice.
368  * @nbytes: Maximum space to free in bytes.
369  *
370  * Free up to @nbytes bytes of space previously reserved for writing in the
371  * COMEDI acquisition data buffer associated with the subdevice.  The amount of
372  * space freed is limited to the amount that was reserved.  The freed space is
373  * assumed to have been filled with sample data by the writer.
374  *
375  * If the samples in the freed space need to be "munged", do so here.  The
376  * freed space becomes available for allocation by the reader.
377  *
378  * Return: The amount of space freed in bytes.
379  */
comedi_buf_write_free(struct comedi_subdevice * s,unsigned int nbytes)380 unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
381 				   unsigned int nbytes)
382 {
383 	struct comedi_async *async = s->async;
384 	unsigned int allocated = comedi_buf_write_n_allocated(s);
385 
386 	if (nbytes > allocated)
387 		nbytes = allocated;
388 
389 	async->buf_write_count += nbytes;
390 	async->buf_write_ptr += nbytes;
391 	comedi_buf_munge(s, async->buf_write_count - async->munge_count);
392 	if (async->buf_write_ptr >= async->prealloc_bufsz)
393 		async->buf_write_ptr %= async->prealloc_bufsz;
394 
395 	return nbytes;
396 }
397 EXPORT_SYMBOL_GPL(comedi_buf_write_free);
398 
399 /**
400  * comedi_buf_read_n_available() - Determine amount of readable buffer space
401  * @s: COMEDI subdevice.
402  *
403  * Determine the amount of readable buffer space in the COMEDI acquisition data
404  * buffer associated with the subdevice.  The readable buffer space is that
405  * which has been freed by the writer and "munged" to the sample data format
406  * expected by COMEDI if necessary.
407  *
408  * Return: The amount of readable buffer space.
409  */
comedi_buf_read_n_available(struct comedi_subdevice * s)410 unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
411 {
412 	struct comedi_async *async = s->async;
413 	unsigned int num_bytes;
414 
415 	if (!async)
416 		return 0;
417 
418 	num_bytes = async->munge_count - async->buf_read_count;
419 
420 	/*
421 	 * ensure the async buffer 'counts' are read before we
422 	 * attempt to read data from the buffer
423 	 */
424 	smp_rmb();
425 
426 	return num_bytes;
427 }
428 EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
429 
430 /**
431  * comedi_buf_read_alloc() - Reserve buffer space for reading
432  * @s: COMEDI subdevice.
433  * @nbytes: Maximum space to reserve in bytes.
434  *
435  * Reserve up to @nbytes bytes of previously written and "munged" buffer space
436  * for reading in the COMEDI acquisition data buffer associated with the
437  * subdevice.  The amount reserved is limited to the space available.  The
438  * reader can read from the reserved space and then free it.  A reader is also
439  * allowed to read from the space before reserving it as long as it determines
440  * the amount of readable data available, but the space needs to be marked as
441  * reserved before it can be freed.
442  *
443  * Return: The amount of space reserved in bytes.
444  */
comedi_buf_read_alloc(struct comedi_subdevice * s,unsigned int nbytes)445 unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
446 				   unsigned int nbytes)
447 {
448 	struct comedi_async *async = s->async;
449 	unsigned int available;
450 
451 	available = async->munge_count - async->buf_read_alloc_count;
452 	if (nbytes > available)
453 		nbytes = available;
454 
455 	async->buf_read_alloc_count += nbytes;
456 
457 	/*
458 	 * ensure the async buffer 'counts' are read before we
459 	 * attempt to read data from the read-alloc'ed buffer space
460 	 */
461 	smp_rmb();
462 
463 	return nbytes;
464 }
465 EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
466 
comedi_buf_read_n_allocated(struct comedi_async * async)467 static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
468 {
469 	return async->buf_read_alloc_count - async->buf_read_count;
470 }
471 
472 /**
473  * comedi_buf_read_free() - Free buffer space after it has been read
474  * @s: COMEDI subdevice.
475  * @nbytes: Maximum space to free in bytes.
476  *
477  * Free up to @nbytes bytes of buffer space previously reserved for reading in
478  * the COMEDI acquisition data buffer associated with the subdevice.  The
479  * amount of space freed is limited to the amount that was reserved.
480  *
481  * The freed space becomes available for allocation by the writer.
482  *
483  * Return: The amount of space freed in bytes.
484  */
comedi_buf_read_free(struct comedi_subdevice * s,unsigned int nbytes)485 unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
486 				  unsigned int nbytes)
487 {
488 	struct comedi_async *async = s->async;
489 	unsigned int allocated;
490 
491 	/*
492 	 * ensure data has been read out of buffer before
493 	 * the async read count is incremented
494 	 */
495 	smp_mb();
496 
497 	allocated = comedi_buf_read_n_allocated(async);
498 	if (nbytes > allocated)
499 		nbytes = allocated;
500 
501 	async->buf_read_count += nbytes;
502 	async->buf_read_ptr += nbytes;
503 	async->buf_read_ptr %= async->prealloc_bufsz;
504 	return nbytes;
505 }
506 EXPORT_SYMBOL_GPL(comedi_buf_read_free);
507 
comedi_buf_memcpy_to(struct comedi_subdevice * s,const void * data,unsigned int num_bytes)508 static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
509 				 const void *data, unsigned int num_bytes)
510 {
511 	struct comedi_async *async = s->async;
512 	unsigned int write_ptr = async->buf_write_ptr;
513 
514 	while (num_bytes) {
515 		unsigned int block_size;
516 
517 		if (write_ptr + num_bytes > async->prealloc_bufsz)
518 			block_size = async->prealloc_bufsz - write_ptr;
519 		else
520 			block_size = num_bytes;
521 
522 		memcpy(async->prealloc_buf + write_ptr, data, block_size);
523 
524 		data += block_size;
525 		num_bytes -= block_size;
526 
527 		write_ptr = 0;
528 	}
529 }
530 
comedi_buf_memcpy_from(struct comedi_subdevice * s,void * dest,unsigned int nbytes)531 static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
532 				   void *dest, unsigned int nbytes)
533 {
534 	void *src;
535 	struct comedi_async *async = s->async;
536 	unsigned int read_ptr = async->buf_read_ptr;
537 
538 	while (nbytes) {
539 		unsigned int block_size;
540 
541 		src = async->prealloc_buf + read_ptr;
542 
543 		if (nbytes >= async->prealloc_bufsz - read_ptr)
544 			block_size = async->prealloc_bufsz - read_ptr;
545 		else
546 			block_size = nbytes;
547 
548 		memcpy(dest, src, block_size);
549 		nbytes -= block_size;
550 		dest += block_size;
551 		read_ptr = 0;
552 	}
553 }
554 
555 /**
556  * comedi_buf_write_samples() - Write sample data to COMEDI buffer
557  * @s: COMEDI subdevice.
558  * @data: Pointer to source samples.
559  * @nsamples: Number of samples to write.
560  *
561  * Write up to @nsamples samples to the COMEDI acquisition data buffer
562  * associated with the subdevice, mark it as written and update the
563  * acquisition scan progress.  If there is not enough room for the specified
564  * number of samples, the number of samples written is limited to the number
565  * that will fit and the %COMEDI_CB_OVERFLOW event flag is set to cause the
566  * acquisition to terminate with an overrun error.  Set the %COMEDI_CB_BLOCK
567  * event flag if any samples are written to cause waiting tasks to be woken
568  * when the event flags are processed.
569  *
570  * Return: The amount of data written in bytes.
571  */
comedi_buf_write_samples(struct comedi_subdevice * s,const void * data,unsigned int nsamples)572 unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
573 				      const void *data, unsigned int nsamples)
574 {
575 	unsigned int max_samples;
576 	unsigned int nbytes;
577 
578 	/*
579 	 * Make sure there is enough room in the buffer for all the samples.
580 	 * If not, clamp the nsamples to the number that will fit, flag the
581 	 * buffer overrun and add the samples that fit.
582 	 */
583 	max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
584 	if (nsamples > max_samples) {
585 		dev_warn(s->device->class_dev, "buffer overrun\n");
586 		s->async->events |= COMEDI_CB_OVERFLOW;
587 		nsamples = max_samples;
588 	}
589 
590 	if (nsamples == 0)
591 		return 0;
592 
593 	nbytes = comedi_buf_write_alloc(s,
594 					comedi_samples_to_bytes(s, nsamples));
595 	comedi_buf_memcpy_to(s, data, nbytes);
596 	comedi_buf_write_free(s, nbytes);
597 	comedi_inc_scan_progress(s, nbytes);
598 	s->async->events |= COMEDI_CB_BLOCK;
599 
600 	return nbytes;
601 }
602 EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
603 
604 /**
605  * comedi_buf_read_samples() - Read sample data from COMEDI buffer
606  * @s: COMEDI subdevice.
607  * @data: Pointer to destination.
608  * @nsamples: Maximum number of samples to read.
609  *
610  * Read up to @nsamples samples from the COMEDI acquisition data buffer
611  * associated with the subdevice, mark it as read and update the acquisition
612  * scan progress.  Limit the number of samples read to the number available.
613  * Set the %COMEDI_CB_BLOCK event flag if any samples are read to cause waiting
614  * tasks to be woken when the event flags are processed.
615  *
616  * Return: The amount of data read in bytes.
617  */
comedi_buf_read_samples(struct comedi_subdevice * s,void * data,unsigned int nsamples)618 unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
619 				     void *data, unsigned int nsamples)
620 {
621 	unsigned int max_samples;
622 	unsigned int nbytes;
623 
624 	/* clamp nsamples to the number of full samples available */
625 	max_samples = comedi_bytes_to_samples(s,
626 					      comedi_buf_read_n_available(s));
627 	if (nsamples > max_samples)
628 		nsamples = max_samples;
629 
630 	if (nsamples == 0)
631 		return 0;
632 
633 	nbytes = comedi_buf_read_alloc(s,
634 				       comedi_samples_to_bytes(s, nsamples));
635 	comedi_buf_memcpy_from(s, data, nbytes);
636 	comedi_buf_read_free(s, nbytes);
637 	comedi_inc_scan_progress(s, nbytes);
638 	s->async->events |= COMEDI_CB_BLOCK;
639 
640 	return nbytes;
641 }
642 EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
643