1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
16 */
17
18 /*
19 * This code implements the DMA subsystem. It provides a HW-neutral interface
20 * for other kernel code to use asynchronous memory copy capabilities,
21 * if present, and allows different HW DMA drivers to register as providing
22 * this capability.
23 *
24 * Due to the fact we are accelerating what is already a relatively fast
25 * operation, the code goes to great lengths to avoid additional overhead,
26 * such as locking.
27 *
28 * LOCKING:
29 *
30 * The subsystem keeps a global list of dma_device structs it is protected by a
31 * mutex, dma_list_mutex.
32 *
33 * A subsystem can get access to a channel by calling dmaengine_get() followed
34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
35 * dma_request_channel(). Once a channel is allocated a reference is taken
36 * against its corresponding driver to disable removal.
37 *
38 * Each device has a channels list, which runs unlocked but is never modified
39 * once the device is registered, it's just setup by the driver.
40 *
41 * See Documentation/driver-api/dmaengine for more details
42 */
43
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46 #include <linux/platform_device.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/init.h>
49 #include <linux/module.h>
50 #include <linux/mm.h>
51 #include <linux/device.h>
52 #include <linux/dmaengine.h>
53 #include <linux/hardirq.h>
54 #include <linux/spinlock.h>
55 #include <linux/percpu.h>
56 #include <linux/rcupdate.h>
57 #include <linux/mutex.h>
58 #include <linux/jiffies.h>
59 #include <linux/rculist.h>
60 #include <linux/idr.h>
61 #include <linux/slab.h>
62 #include <linux/acpi.h>
63 #include <linux/acpi_dma.h>
64 #include <linux/of_dma.h>
65 #include <linux/mempool.h>
66
67 static DEFINE_MUTEX(dma_list_mutex);
68 static DEFINE_IDA(dma_ida);
69 static LIST_HEAD(dma_device_list);
70 static long dmaengine_ref_count;
71
72 /* --- sysfs implementation --- */
73
74 /**
75 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
76 * @dev - device node
77 *
78 * Must be called under dma_list_mutex
79 */
dev_to_dma_chan(struct device * dev)80 static struct dma_chan *dev_to_dma_chan(struct device *dev)
81 {
82 struct dma_chan_dev *chan_dev;
83
84 chan_dev = container_of(dev, typeof(*chan_dev), device);
85 return chan_dev->chan;
86 }
87
memcpy_count_show(struct device * dev,struct device_attribute * attr,char * buf)88 static ssize_t memcpy_count_show(struct device *dev,
89 struct device_attribute *attr, char *buf)
90 {
91 struct dma_chan *chan;
92 unsigned long count = 0;
93 int i;
94 int err;
95
96 mutex_lock(&dma_list_mutex);
97 chan = dev_to_dma_chan(dev);
98 if (chan) {
99 for_each_possible_cpu(i)
100 count += per_cpu_ptr(chan->local, i)->memcpy_count;
101 err = sprintf(buf, "%lu\n", count);
102 } else
103 err = -ENODEV;
104 mutex_unlock(&dma_list_mutex);
105
106 return err;
107 }
108 static DEVICE_ATTR_RO(memcpy_count);
109
bytes_transferred_show(struct device * dev,struct device_attribute * attr,char * buf)110 static ssize_t bytes_transferred_show(struct device *dev,
111 struct device_attribute *attr, char *buf)
112 {
113 struct dma_chan *chan;
114 unsigned long count = 0;
115 int i;
116 int err;
117
118 mutex_lock(&dma_list_mutex);
119 chan = dev_to_dma_chan(dev);
120 if (chan) {
121 for_each_possible_cpu(i)
122 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
123 err = sprintf(buf, "%lu\n", count);
124 } else
125 err = -ENODEV;
126 mutex_unlock(&dma_list_mutex);
127
128 return err;
129 }
130 static DEVICE_ATTR_RO(bytes_transferred);
131
in_use_show(struct device * dev,struct device_attribute * attr,char * buf)132 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
133 char *buf)
134 {
135 struct dma_chan *chan;
136 int err;
137
138 mutex_lock(&dma_list_mutex);
139 chan = dev_to_dma_chan(dev);
140 if (chan)
141 err = sprintf(buf, "%d\n", chan->client_count);
142 else
143 err = -ENODEV;
144 mutex_unlock(&dma_list_mutex);
145
146 return err;
147 }
148 static DEVICE_ATTR_RO(in_use);
149
150 static struct attribute *dma_dev_attrs[] = {
151 &dev_attr_memcpy_count.attr,
152 &dev_attr_bytes_transferred.attr,
153 &dev_attr_in_use.attr,
154 NULL,
155 };
156 ATTRIBUTE_GROUPS(dma_dev);
157
chan_dev_release(struct device * dev)158 static void chan_dev_release(struct device *dev)
159 {
160 struct dma_chan_dev *chan_dev;
161
162 chan_dev = container_of(dev, typeof(*chan_dev), device);
163 if (atomic_dec_and_test(chan_dev->idr_ref)) {
164 ida_free(&dma_ida, chan_dev->dev_id);
165 kfree(chan_dev->idr_ref);
166 }
167 kfree(chan_dev);
168 }
169
170 static struct class dma_devclass = {
171 .name = "dma",
172 .dev_groups = dma_dev_groups,
173 .dev_release = chan_dev_release,
174 };
175
176 /* --- client and device registration --- */
177
178 #define dma_device_satisfies_mask(device, mask) \
179 __dma_device_satisfies_mask((device), &(mask))
180 static int
__dma_device_satisfies_mask(struct dma_device * device,const dma_cap_mask_t * want)181 __dma_device_satisfies_mask(struct dma_device *device,
182 const dma_cap_mask_t *want)
183 {
184 dma_cap_mask_t has;
185
186 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
187 DMA_TX_TYPE_END);
188 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
189 }
190
dma_chan_to_owner(struct dma_chan * chan)191 static struct module *dma_chan_to_owner(struct dma_chan *chan)
192 {
193 return chan->device->dev->driver->owner;
194 }
195
196 /**
197 * balance_ref_count - catch up the channel reference count
198 * @chan - channel to balance ->client_count versus dmaengine_ref_count
199 *
200 * balance_ref_count must be called under dma_list_mutex
201 */
balance_ref_count(struct dma_chan * chan)202 static void balance_ref_count(struct dma_chan *chan)
203 {
204 struct module *owner = dma_chan_to_owner(chan);
205
206 while (chan->client_count < dmaengine_ref_count) {
207 __module_get(owner);
208 chan->client_count++;
209 }
210 }
211
212 /**
213 * dma_chan_get - try to grab a dma channel's parent driver module
214 * @chan - channel to grab
215 *
216 * Must be called under dma_list_mutex
217 */
dma_chan_get(struct dma_chan * chan)218 static int dma_chan_get(struct dma_chan *chan)
219 {
220 struct module *owner = dma_chan_to_owner(chan);
221 int ret;
222
223 /* The channel is already in use, update client count */
224 if (chan->client_count) {
225 __module_get(owner);
226 goto out;
227 }
228
229 if (!try_module_get(owner))
230 return -ENODEV;
231
232 /* allocate upon first client reference */
233 if (chan->device->device_alloc_chan_resources) {
234 ret = chan->device->device_alloc_chan_resources(chan);
235 if (ret < 0)
236 goto err_out;
237 }
238
239 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
240 balance_ref_count(chan);
241
242 out:
243 chan->client_count++;
244 return 0;
245
246 err_out:
247 module_put(owner);
248 return ret;
249 }
250
251 /**
252 * dma_chan_put - drop a reference to a dma channel's parent driver module
253 * @chan - channel to release
254 *
255 * Must be called under dma_list_mutex
256 */
dma_chan_put(struct dma_chan * chan)257 static void dma_chan_put(struct dma_chan *chan)
258 {
259 /* This channel is not in use, bail out */
260 if (!chan->client_count)
261 return;
262
263 chan->client_count--;
264 module_put(dma_chan_to_owner(chan));
265
266 /* This channel is not in use anymore, free it */
267 if (!chan->client_count && chan->device->device_free_chan_resources) {
268 /* Make sure all operations have completed */
269 dmaengine_synchronize(chan);
270 chan->device->device_free_chan_resources(chan);
271 }
272
273 /* If the channel is used via a DMA request router, free the mapping */
274 if (chan->router && chan->router->route_free) {
275 chan->router->route_free(chan->router->dev, chan->route_data);
276 chan->router = NULL;
277 chan->route_data = NULL;
278 }
279 }
280
dma_sync_wait(struct dma_chan * chan,dma_cookie_t cookie)281 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
282 {
283 enum dma_status status;
284 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
285
286 dma_async_issue_pending(chan);
287 do {
288 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
289 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
290 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
291 return DMA_ERROR;
292 }
293 if (status != DMA_IN_PROGRESS)
294 break;
295 cpu_relax();
296 } while (1);
297
298 return status;
299 }
300 EXPORT_SYMBOL(dma_sync_wait);
301
302 /**
303 * dma_cap_mask_all - enable iteration over all operation types
304 */
305 static dma_cap_mask_t dma_cap_mask_all;
306
307 /**
308 * dma_chan_tbl_ent - tracks channel allocations per core/operation
309 * @chan - associated channel for this entry
310 */
311 struct dma_chan_tbl_ent {
312 struct dma_chan *chan;
313 };
314
315 /**
316 * channel_table - percpu lookup table for memory-to-memory offload providers
317 */
318 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
319
dma_channel_table_init(void)320 static int __init dma_channel_table_init(void)
321 {
322 enum dma_transaction_type cap;
323 int err = 0;
324
325 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
326
327 /* 'interrupt', 'private', and 'slave' are channel capabilities,
328 * but are not associated with an operation so they do not need
329 * an entry in the channel_table
330 */
331 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
332 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
333 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
334
335 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
336 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
337 if (!channel_table[cap]) {
338 err = -ENOMEM;
339 break;
340 }
341 }
342
343 if (err) {
344 pr_err("initialization failure\n");
345 for_each_dma_cap_mask(cap, dma_cap_mask_all)
346 free_percpu(channel_table[cap]);
347 }
348
349 return err;
350 }
351 arch_initcall(dma_channel_table_init);
352
353 /**
354 * dma_find_channel - find a channel to carry out the operation
355 * @tx_type: transaction type
356 */
dma_find_channel(enum dma_transaction_type tx_type)357 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
358 {
359 return this_cpu_read(channel_table[tx_type]->chan);
360 }
361 EXPORT_SYMBOL(dma_find_channel);
362
363 /**
364 * dma_issue_pending_all - flush all pending operations across all channels
365 */
dma_issue_pending_all(void)366 void dma_issue_pending_all(void)
367 {
368 struct dma_device *device;
369 struct dma_chan *chan;
370
371 rcu_read_lock();
372 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
373 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
374 continue;
375 list_for_each_entry(chan, &device->channels, device_node)
376 if (chan->client_count)
377 device->device_issue_pending(chan);
378 }
379 rcu_read_unlock();
380 }
381 EXPORT_SYMBOL(dma_issue_pending_all);
382
383 /**
384 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
385 */
dma_chan_is_local(struct dma_chan * chan,int cpu)386 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
387 {
388 int node = dev_to_node(chan->device->dev);
389 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
390 }
391
392 /**
393 * min_chan - returns the channel with min count and in the same numa-node as the cpu
394 * @cap: capability to match
395 * @cpu: cpu index which the channel should be close to
396 *
397 * If some channels are close to the given cpu, the one with the lowest
398 * reference count is returned. Otherwise, cpu is ignored and only the
399 * reference count is taken into account.
400 * Must be called under dma_list_mutex.
401 */
min_chan(enum dma_transaction_type cap,int cpu)402 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
403 {
404 struct dma_device *device;
405 struct dma_chan *chan;
406 struct dma_chan *min = NULL;
407 struct dma_chan *localmin = NULL;
408
409 list_for_each_entry(device, &dma_device_list, global_node) {
410 if (!dma_has_cap(cap, device->cap_mask) ||
411 dma_has_cap(DMA_PRIVATE, device->cap_mask))
412 continue;
413 list_for_each_entry(chan, &device->channels, device_node) {
414 if (!chan->client_count)
415 continue;
416 if (!min || chan->table_count < min->table_count)
417 min = chan;
418
419 if (dma_chan_is_local(chan, cpu))
420 if (!localmin ||
421 chan->table_count < localmin->table_count)
422 localmin = chan;
423 }
424 }
425
426 chan = localmin ? localmin : min;
427
428 if (chan)
429 chan->table_count++;
430
431 return chan;
432 }
433
434 /**
435 * dma_channel_rebalance - redistribute the available channels
436 *
437 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
438 * operation type) in the SMP case, and operation isolation (avoid
439 * multi-tasking channels) in the non-SMP case. Must be called under
440 * dma_list_mutex.
441 */
dma_channel_rebalance(void)442 static void dma_channel_rebalance(void)
443 {
444 struct dma_chan *chan;
445 struct dma_device *device;
446 int cpu;
447 int cap;
448
449 /* undo the last distribution */
450 for_each_dma_cap_mask(cap, dma_cap_mask_all)
451 for_each_possible_cpu(cpu)
452 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
453
454 list_for_each_entry(device, &dma_device_list, global_node) {
455 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
456 continue;
457 list_for_each_entry(chan, &device->channels, device_node)
458 chan->table_count = 0;
459 }
460
461 /* don't populate the channel_table if no clients are available */
462 if (!dmaengine_ref_count)
463 return;
464
465 /* redistribute available channels */
466 for_each_dma_cap_mask(cap, dma_cap_mask_all)
467 for_each_online_cpu(cpu) {
468 chan = min_chan(cap, cpu);
469 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
470 }
471 }
472
dma_get_slave_caps(struct dma_chan * chan,struct dma_slave_caps * caps)473 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
474 {
475 struct dma_device *device;
476
477 if (!chan || !caps)
478 return -EINVAL;
479
480 device = chan->device;
481
482 /* check if the channel supports slave transactions */
483 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
484 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
485 return -ENXIO;
486
487 /*
488 * Check whether it reports it uses the generic slave
489 * capabilities, if not, that means it doesn't support any
490 * kind of slave capabilities reporting.
491 */
492 if (!device->directions)
493 return -ENXIO;
494
495 caps->src_addr_widths = device->src_addr_widths;
496 caps->dst_addr_widths = device->dst_addr_widths;
497 caps->directions = device->directions;
498 caps->max_burst = device->max_burst;
499 caps->residue_granularity = device->residue_granularity;
500 caps->descriptor_reuse = device->descriptor_reuse;
501 caps->cmd_pause = !!device->device_pause;
502 caps->cmd_resume = !!device->device_resume;
503 caps->cmd_terminate = !!device->device_terminate_all;
504
505 return 0;
506 }
507 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
508
private_candidate(const dma_cap_mask_t * mask,struct dma_device * dev,dma_filter_fn fn,void * fn_param)509 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
510 struct dma_device *dev,
511 dma_filter_fn fn, void *fn_param)
512 {
513 struct dma_chan *chan;
514
515 if (mask && !__dma_device_satisfies_mask(dev, mask)) {
516 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
517 return NULL;
518 }
519 /* devices with multiple channels need special handling as we need to
520 * ensure that all channels are either private or public.
521 */
522 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
523 list_for_each_entry(chan, &dev->channels, device_node) {
524 /* some channels are already publicly allocated */
525 if (chan->client_count)
526 return NULL;
527 }
528
529 list_for_each_entry(chan, &dev->channels, device_node) {
530 if (chan->client_count) {
531 dev_dbg(dev->dev, "%s: %s busy\n",
532 __func__, dma_chan_name(chan));
533 continue;
534 }
535 if (fn && !fn(chan, fn_param)) {
536 dev_dbg(dev->dev, "%s: %s filter said false\n",
537 __func__, dma_chan_name(chan));
538 continue;
539 }
540 return chan;
541 }
542
543 return NULL;
544 }
545
find_candidate(struct dma_device * device,const dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param)546 static struct dma_chan *find_candidate(struct dma_device *device,
547 const dma_cap_mask_t *mask,
548 dma_filter_fn fn, void *fn_param)
549 {
550 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
551 int err;
552
553 if (chan) {
554 /* Found a suitable channel, try to grab, prep, and return it.
555 * We first set DMA_PRIVATE to disable balance_ref_count as this
556 * channel will not be published in the general-purpose
557 * allocator
558 */
559 dma_cap_set(DMA_PRIVATE, device->cap_mask);
560 device->privatecnt++;
561 err = dma_chan_get(chan);
562
563 if (err) {
564 if (err == -ENODEV) {
565 dev_dbg(device->dev, "%s: %s module removed\n",
566 __func__, dma_chan_name(chan));
567 list_del_rcu(&device->global_node);
568 } else
569 dev_dbg(device->dev,
570 "%s: failed to get %s: (%d)\n",
571 __func__, dma_chan_name(chan), err);
572
573 if (--device->privatecnt == 0)
574 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
575
576 chan = ERR_PTR(err);
577 }
578 }
579
580 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
581 }
582
583 /**
584 * dma_get_slave_channel - try to get specific channel exclusively
585 * @chan: target channel
586 */
dma_get_slave_channel(struct dma_chan * chan)587 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
588 {
589 int err = -EBUSY;
590
591 /* lock against __dma_request_channel */
592 mutex_lock(&dma_list_mutex);
593
594 if (chan->client_count == 0) {
595 struct dma_device *device = chan->device;
596
597 dma_cap_set(DMA_PRIVATE, device->cap_mask);
598 device->privatecnt++;
599 err = dma_chan_get(chan);
600 if (err) {
601 dev_dbg(chan->device->dev,
602 "%s: failed to get %s: (%d)\n",
603 __func__, dma_chan_name(chan), err);
604 chan = NULL;
605 if (--device->privatecnt == 0)
606 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
607 }
608 } else
609 chan = NULL;
610
611 mutex_unlock(&dma_list_mutex);
612
613
614 return chan;
615 }
616 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
617
dma_get_any_slave_channel(struct dma_device * device)618 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
619 {
620 dma_cap_mask_t mask;
621 struct dma_chan *chan;
622
623 dma_cap_zero(mask);
624 dma_cap_set(DMA_SLAVE, mask);
625
626 /* lock against __dma_request_channel */
627 mutex_lock(&dma_list_mutex);
628
629 chan = find_candidate(device, &mask, NULL, NULL);
630
631 mutex_unlock(&dma_list_mutex);
632
633 return IS_ERR(chan) ? NULL : chan;
634 }
635 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
636
637 /**
638 * __dma_request_channel - try to allocate an exclusive channel
639 * @mask: capabilities that the channel must satisfy
640 * @fn: optional callback to disposition available channels
641 * @fn_param: opaque parameter to pass to dma_filter_fn
642 *
643 * Returns pointer to appropriate DMA channel on success or NULL.
644 */
__dma_request_channel(const dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param)645 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
646 dma_filter_fn fn, void *fn_param)
647 {
648 struct dma_device *device, *_d;
649 struct dma_chan *chan = NULL;
650
651 /* Find a channel */
652 mutex_lock(&dma_list_mutex);
653 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
654 chan = find_candidate(device, mask, fn, fn_param);
655 if (!IS_ERR(chan))
656 break;
657
658 chan = NULL;
659 }
660 mutex_unlock(&dma_list_mutex);
661
662 pr_debug("%s: %s (%s)\n",
663 __func__,
664 chan ? "success" : "fail",
665 chan ? dma_chan_name(chan) : NULL);
666
667 return chan;
668 }
669 EXPORT_SYMBOL_GPL(__dma_request_channel);
670
dma_filter_match(struct dma_device * device,const char * name,struct device * dev)671 static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
672 const char *name,
673 struct device *dev)
674 {
675 int i;
676
677 if (!device->filter.mapcnt)
678 return NULL;
679
680 for (i = 0; i < device->filter.mapcnt; i++) {
681 const struct dma_slave_map *map = &device->filter.map[i];
682
683 if (!strcmp(map->devname, dev_name(dev)) &&
684 !strcmp(map->slave, name))
685 return map;
686 }
687
688 return NULL;
689 }
690
691 /**
692 * dma_request_chan - try to allocate an exclusive slave channel
693 * @dev: pointer to client device structure
694 * @name: slave channel name
695 *
696 * Returns pointer to appropriate DMA channel on success or an error pointer.
697 */
dma_request_chan(struct device * dev,const char * name)698 struct dma_chan *dma_request_chan(struct device *dev, const char *name)
699 {
700 struct dma_device *d, *_d;
701 struct dma_chan *chan = NULL;
702
703 /* If device-tree is present get slave info from here */
704 if (dev->of_node)
705 chan = of_dma_request_slave_channel(dev->of_node, name);
706
707 /* If device was enumerated by ACPI get slave info from here */
708 if (has_acpi_companion(dev) && !chan)
709 chan = acpi_dma_request_slave_chan_by_name(dev, name);
710
711 if (chan) {
712 /* Valid channel found or requester need to be deferred */
713 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
714 return chan;
715 }
716
717 /* Try to find the channel via the DMA filter map(s) */
718 mutex_lock(&dma_list_mutex);
719 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
720 dma_cap_mask_t mask;
721 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
722
723 if (!map)
724 continue;
725
726 dma_cap_zero(mask);
727 dma_cap_set(DMA_SLAVE, mask);
728
729 chan = find_candidate(d, &mask, d->filter.fn, map->param);
730 if (!IS_ERR(chan))
731 break;
732 }
733 mutex_unlock(&dma_list_mutex);
734
735 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
736 }
737 EXPORT_SYMBOL_GPL(dma_request_chan);
738
739 /**
740 * dma_request_slave_channel - try to allocate an exclusive slave channel
741 * @dev: pointer to client device structure
742 * @name: slave channel name
743 *
744 * Returns pointer to appropriate DMA channel on success or NULL.
745 */
dma_request_slave_channel(struct device * dev,const char * name)746 struct dma_chan *dma_request_slave_channel(struct device *dev,
747 const char *name)
748 {
749 struct dma_chan *ch = dma_request_chan(dev, name);
750 if (IS_ERR(ch))
751 return NULL;
752
753 return ch;
754 }
755 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
756
757 /**
758 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
759 * @mask: capabilities that the channel must satisfy
760 *
761 * Returns pointer to appropriate DMA channel on success or an error pointer.
762 */
dma_request_chan_by_mask(const dma_cap_mask_t * mask)763 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
764 {
765 struct dma_chan *chan;
766
767 if (!mask)
768 return ERR_PTR(-ENODEV);
769
770 chan = __dma_request_channel(mask, NULL, NULL);
771 if (!chan) {
772 mutex_lock(&dma_list_mutex);
773 if (list_empty(&dma_device_list))
774 chan = ERR_PTR(-EPROBE_DEFER);
775 else
776 chan = ERR_PTR(-ENODEV);
777 mutex_unlock(&dma_list_mutex);
778 }
779
780 return chan;
781 }
782 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
783
dma_release_channel(struct dma_chan * chan)784 void dma_release_channel(struct dma_chan *chan)
785 {
786 mutex_lock(&dma_list_mutex);
787 WARN_ONCE(chan->client_count != 1,
788 "chan reference count %d != 1\n", chan->client_count);
789 dma_chan_put(chan);
790 /* drop PRIVATE cap enabled by __dma_request_channel() */
791 if (--chan->device->privatecnt == 0)
792 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
793 mutex_unlock(&dma_list_mutex);
794 }
795 EXPORT_SYMBOL_GPL(dma_release_channel);
796
797 /**
798 * dmaengine_get - register interest in dma_channels
799 */
dmaengine_get(void)800 void dmaengine_get(void)
801 {
802 struct dma_device *device, *_d;
803 struct dma_chan *chan;
804 int err;
805
806 mutex_lock(&dma_list_mutex);
807 dmaengine_ref_count++;
808
809 /* try to grab channels */
810 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
811 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
812 continue;
813 list_for_each_entry(chan, &device->channels, device_node) {
814 err = dma_chan_get(chan);
815 if (err == -ENODEV) {
816 /* module removed before we could use it */
817 list_del_rcu(&device->global_node);
818 break;
819 } else if (err)
820 dev_dbg(chan->device->dev,
821 "%s: failed to get %s: (%d)\n",
822 __func__, dma_chan_name(chan), err);
823 }
824 }
825
826 /* if this is the first reference and there were channels
827 * waiting we need to rebalance to get those channels
828 * incorporated into the channel table
829 */
830 if (dmaengine_ref_count == 1)
831 dma_channel_rebalance();
832 mutex_unlock(&dma_list_mutex);
833 }
834 EXPORT_SYMBOL(dmaengine_get);
835
836 /**
837 * dmaengine_put - let dma drivers be removed when ref_count == 0
838 */
dmaengine_put(void)839 void dmaengine_put(void)
840 {
841 struct dma_device *device;
842 struct dma_chan *chan;
843
844 mutex_lock(&dma_list_mutex);
845 dmaengine_ref_count--;
846 BUG_ON(dmaengine_ref_count < 0);
847 /* drop channel references */
848 list_for_each_entry(device, &dma_device_list, global_node) {
849 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
850 continue;
851 list_for_each_entry(chan, &device->channels, device_node)
852 dma_chan_put(chan);
853 }
854 mutex_unlock(&dma_list_mutex);
855 }
856 EXPORT_SYMBOL(dmaengine_put);
857
device_has_all_tx_types(struct dma_device * device)858 static bool device_has_all_tx_types(struct dma_device *device)
859 {
860 /* A device that satisfies this test has channels that will never cause
861 * an async_tx channel switch event as all possible operation types can
862 * be handled.
863 */
864 #ifdef CONFIG_ASYNC_TX_DMA
865 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
866 return false;
867 #endif
868
869 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
870 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
871 return false;
872 #endif
873
874 #if IS_ENABLED(CONFIG_ASYNC_XOR)
875 if (!dma_has_cap(DMA_XOR, device->cap_mask))
876 return false;
877
878 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
879 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
880 return false;
881 #endif
882 #endif
883
884 #if IS_ENABLED(CONFIG_ASYNC_PQ)
885 if (!dma_has_cap(DMA_PQ, device->cap_mask))
886 return false;
887
888 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
889 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
890 return false;
891 #endif
892 #endif
893
894 return true;
895 }
896
get_dma_id(struct dma_device * device)897 static int get_dma_id(struct dma_device *device)
898 {
899 int rc = ida_alloc(&dma_ida, GFP_KERNEL);
900
901 if (rc < 0)
902 return rc;
903 device->dev_id = rc;
904 return 0;
905 }
906
907 /**
908 * dma_async_device_register - registers DMA devices found
909 * @device: &dma_device
910 */
dma_async_device_register(struct dma_device * device)911 int dma_async_device_register(struct dma_device *device)
912 {
913 int chancnt = 0, rc;
914 struct dma_chan* chan;
915 atomic_t *idr_ref;
916
917 if (!device)
918 return -ENODEV;
919
920 /* validate device routines */
921 if (!device->dev) {
922 pr_err("DMAdevice must have dev\n");
923 return -EIO;
924 }
925
926 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
927 dev_err(device->dev,
928 "Device claims capability %s, but op is not defined\n",
929 "DMA_MEMCPY");
930 return -EIO;
931 }
932
933 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
934 dev_err(device->dev,
935 "Device claims capability %s, but op is not defined\n",
936 "DMA_XOR");
937 return -EIO;
938 }
939
940 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
941 dev_err(device->dev,
942 "Device claims capability %s, but op is not defined\n",
943 "DMA_XOR_VAL");
944 return -EIO;
945 }
946
947 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
948 dev_err(device->dev,
949 "Device claims capability %s, but op is not defined\n",
950 "DMA_PQ");
951 return -EIO;
952 }
953
954 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
955 dev_err(device->dev,
956 "Device claims capability %s, but op is not defined\n",
957 "DMA_PQ_VAL");
958 return -EIO;
959 }
960
961 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
962 dev_err(device->dev,
963 "Device claims capability %s, but op is not defined\n",
964 "DMA_MEMSET");
965 return -EIO;
966 }
967
968 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
969 dev_err(device->dev,
970 "Device claims capability %s, but op is not defined\n",
971 "DMA_INTERRUPT");
972 return -EIO;
973 }
974
975 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
976 dev_err(device->dev,
977 "Device claims capability %s, but op is not defined\n",
978 "DMA_CYCLIC");
979 return -EIO;
980 }
981
982 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
983 dev_err(device->dev,
984 "Device claims capability %s, but op is not defined\n",
985 "DMA_INTERLEAVE");
986 return -EIO;
987 }
988
989
990 if (!device->device_tx_status) {
991 dev_err(device->dev, "Device tx_status is not defined\n");
992 return -EIO;
993 }
994
995
996 if (!device->device_issue_pending) {
997 dev_err(device->dev, "Device issue_pending is not defined\n");
998 return -EIO;
999 }
1000
1001 /* note: this only matters in the
1002 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1003 */
1004 if (device_has_all_tx_types(device))
1005 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1006
1007 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1008 if (!idr_ref)
1009 return -ENOMEM;
1010 rc = get_dma_id(device);
1011 if (rc != 0) {
1012 kfree(idr_ref);
1013 return rc;
1014 }
1015
1016 atomic_set(idr_ref, 0);
1017
1018 /* represent channels in sysfs. Probably want devs too */
1019 list_for_each_entry(chan, &device->channels, device_node) {
1020 rc = -ENOMEM;
1021 chan->local = alloc_percpu(typeof(*chan->local));
1022 if (chan->local == NULL)
1023 goto err_out;
1024 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1025 if (chan->dev == NULL) {
1026 free_percpu(chan->local);
1027 chan->local = NULL;
1028 goto err_out;
1029 }
1030
1031 chan->chan_id = chancnt++;
1032 chan->dev->device.class = &dma_devclass;
1033 chan->dev->device.parent = device->dev;
1034 chan->dev->chan = chan;
1035 chan->dev->idr_ref = idr_ref;
1036 chan->dev->dev_id = device->dev_id;
1037 atomic_inc(idr_ref);
1038 dev_set_name(&chan->dev->device, "dma%dchan%d",
1039 device->dev_id, chan->chan_id);
1040
1041 rc = device_register(&chan->dev->device);
1042 if (rc) {
1043 free_percpu(chan->local);
1044 chan->local = NULL;
1045 kfree(chan->dev);
1046 atomic_dec(idr_ref);
1047 goto err_out;
1048 }
1049 chan->client_count = 0;
1050 }
1051
1052 if (!chancnt) {
1053 dev_err(device->dev, "%s: device has no channels!\n", __func__);
1054 rc = -ENODEV;
1055 goto err_out;
1056 }
1057
1058 device->chancnt = chancnt;
1059
1060 mutex_lock(&dma_list_mutex);
1061 /* take references on public channels */
1062 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1063 list_for_each_entry(chan, &device->channels, device_node) {
1064 /* if clients are already waiting for channels we need
1065 * to take references on their behalf
1066 */
1067 if (dma_chan_get(chan) == -ENODEV) {
1068 /* note we can only get here for the first
1069 * channel as the remaining channels are
1070 * guaranteed to get a reference
1071 */
1072 rc = -ENODEV;
1073 mutex_unlock(&dma_list_mutex);
1074 goto err_out;
1075 }
1076 }
1077 list_add_tail_rcu(&device->global_node, &dma_device_list);
1078 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1079 device->privatecnt++; /* Always private */
1080 dma_channel_rebalance();
1081 mutex_unlock(&dma_list_mutex);
1082
1083 return 0;
1084
1085 err_out:
1086 /* if we never registered a channel just release the idr */
1087 if (atomic_read(idr_ref) == 0) {
1088 ida_free(&dma_ida, device->dev_id);
1089 kfree(idr_ref);
1090 return rc;
1091 }
1092
1093 list_for_each_entry(chan, &device->channels, device_node) {
1094 if (chan->local == NULL)
1095 continue;
1096 mutex_lock(&dma_list_mutex);
1097 chan->dev->chan = NULL;
1098 mutex_unlock(&dma_list_mutex);
1099 device_unregister(&chan->dev->device);
1100 free_percpu(chan->local);
1101 }
1102 return rc;
1103 }
1104 EXPORT_SYMBOL(dma_async_device_register);
1105
1106 /**
1107 * dma_async_device_unregister - unregister a DMA device
1108 * @device: &dma_device
1109 *
1110 * This routine is called by dma driver exit routines, dmaengine holds module
1111 * references to prevent it being called while channels are in use.
1112 */
dma_async_device_unregister(struct dma_device * device)1113 void dma_async_device_unregister(struct dma_device *device)
1114 {
1115 struct dma_chan *chan;
1116
1117 mutex_lock(&dma_list_mutex);
1118 list_del_rcu(&device->global_node);
1119 dma_channel_rebalance();
1120 mutex_unlock(&dma_list_mutex);
1121
1122 list_for_each_entry(chan, &device->channels, device_node) {
1123 WARN_ONCE(chan->client_count,
1124 "%s called while %d clients hold a reference\n",
1125 __func__, chan->client_count);
1126 mutex_lock(&dma_list_mutex);
1127 chan->dev->chan = NULL;
1128 mutex_unlock(&dma_list_mutex);
1129 device_unregister(&chan->dev->device);
1130 free_percpu(chan->local);
1131 }
1132 }
1133 EXPORT_SYMBOL(dma_async_device_unregister);
1134
dmam_device_release(struct device * dev,void * res)1135 static void dmam_device_release(struct device *dev, void *res)
1136 {
1137 struct dma_device *device;
1138
1139 device = *(struct dma_device **)res;
1140 dma_async_device_unregister(device);
1141 }
1142
1143 /**
1144 * dmaenginem_async_device_register - registers DMA devices found
1145 * @device: &dma_device
1146 *
1147 * The operation is managed and will be undone on driver detach.
1148 */
dmaenginem_async_device_register(struct dma_device * device)1149 int dmaenginem_async_device_register(struct dma_device *device)
1150 {
1151 void *p;
1152 int ret;
1153
1154 p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1155 if (!p)
1156 return -ENOMEM;
1157
1158 ret = dma_async_device_register(device);
1159 if (!ret) {
1160 *(struct dma_device **)p = device;
1161 devres_add(device->dev, p);
1162 } else {
1163 devres_free(p);
1164 }
1165
1166 return ret;
1167 }
1168 EXPORT_SYMBOL(dmaenginem_async_device_register);
1169
1170 struct dmaengine_unmap_pool {
1171 struct kmem_cache *cache;
1172 const char *name;
1173 mempool_t *pool;
1174 size_t size;
1175 };
1176
1177 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1178 static struct dmaengine_unmap_pool unmap_pool[] = {
1179 __UNMAP_POOL(2),
1180 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1181 __UNMAP_POOL(16),
1182 __UNMAP_POOL(128),
1183 __UNMAP_POOL(256),
1184 #endif
1185 };
1186
__get_unmap_pool(int nr)1187 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1188 {
1189 int order = get_count_order(nr);
1190
1191 switch (order) {
1192 case 0 ... 1:
1193 return &unmap_pool[0];
1194 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1195 case 2 ... 4:
1196 return &unmap_pool[1];
1197 case 5 ... 7:
1198 return &unmap_pool[2];
1199 case 8:
1200 return &unmap_pool[3];
1201 #endif
1202 default:
1203 BUG();
1204 return NULL;
1205 }
1206 }
1207
dmaengine_unmap(struct kref * kref)1208 static void dmaengine_unmap(struct kref *kref)
1209 {
1210 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1211 struct device *dev = unmap->dev;
1212 int cnt, i;
1213
1214 cnt = unmap->to_cnt;
1215 for (i = 0; i < cnt; i++)
1216 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1217 DMA_TO_DEVICE);
1218 cnt += unmap->from_cnt;
1219 for (; i < cnt; i++)
1220 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1221 DMA_FROM_DEVICE);
1222 cnt += unmap->bidi_cnt;
1223 for (; i < cnt; i++) {
1224 if (unmap->addr[i] == 0)
1225 continue;
1226 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1227 DMA_BIDIRECTIONAL);
1228 }
1229 cnt = unmap->map_cnt;
1230 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1231 }
1232
dmaengine_unmap_put(struct dmaengine_unmap_data * unmap)1233 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1234 {
1235 if (unmap)
1236 kref_put(&unmap->kref, dmaengine_unmap);
1237 }
1238 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1239
dmaengine_destroy_unmap_pool(void)1240 static void dmaengine_destroy_unmap_pool(void)
1241 {
1242 int i;
1243
1244 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1245 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1246
1247 mempool_destroy(p->pool);
1248 p->pool = NULL;
1249 kmem_cache_destroy(p->cache);
1250 p->cache = NULL;
1251 }
1252 }
1253
dmaengine_init_unmap_pool(void)1254 static int __init dmaengine_init_unmap_pool(void)
1255 {
1256 int i;
1257
1258 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1259 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1260 size_t size;
1261
1262 size = sizeof(struct dmaengine_unmap_data) +
1263 sizeof(dma_addr_t) * p->size;
1264
1265 p->cache = kmem_cache_create(p->name, size, 0,
1266 SLAB_HWCACHE_ALIGN, NULL);
1267 if (!p->cache)
1268 break;
1269 p->pool = mempool_create_slab_pool(1, p->cache);
1270 if (!p->pool)
1271 break;
1272 }
1273
1274 if (i == ARRAY_SIZE(unmap_pool))
1275 return 0;
1276
1277 dmaengine_destroy_unmap_pool();
1278 return -ENOMEM;
1279 }
1280
1281 struct dmaengine_unmap_data *
dmaengine_get_unmap_data(struct device * dev,int nr,gfp_t flags)1282 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1283 {
1284 struct dmaengine_unmap_data *unmap;
1285
1286 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1287 if (!unmap)
1288 return NULL;
1289
1290 memset(unmap, 0, sizeof(*unmap));
1291 kref_init(&unmap->kref);
1292 unmap->dev = dev;
1293 unmap->map_cnt = nr;
1294
1295 return unmap;
1296 }
1297 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1298
dma_async_tx_descriptor_init(struct dma_async_tx_descriptor * tx,struct dma_chan * chan)1299 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1300 struct dma_chan *chan)
1301 {
1302 tx->chan = chan;
1303 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1304 spin_lock_init(&tx->lock);
1305 #endif
1306 }
1307 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1308
1309 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1310 * @tx: in-flight transaction to wait on
1311 */
1312 enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor * tx)1313 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1314 {
1315 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1316
1317 if (!tx)
1318 return DMA_COMPLETE;
1319
1320 while (tx->cookie == -EBUSY) {
1321 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1322 dev_err(tx->chan->device->dev,
1323 "%s timeout waiting for descriptor submission\n",
1324 __func__);
1325 return DMA_ERROR;
1326 }
1327 cpu_relax();
1328 }
1329 return dma_sync_wait(tx->chan, tx->cookie);
1330 }
1331 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1332
1333 /* dma_run_dependencies - helper routine for dma drivers to process
1334 * (start) dependent operations on their target channel
1335 * @tx: transaction with dependencies
1336 */
dma_run_dependencies(struct dma_async_tx_descriptor * tx)1337 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1338 {
1339 struct dma_async_tx_descriptor *dep = txd_next(tx);
1340 struct dma_async_tx_descriptor *dep_next;
1341 struct dma_chan *chan;
1342
1343 if (!dep)
1344 return;
1345
1346 /* we'll submit tx->next now, so clear the link */
1347 txd_clear_next(tx);
1348 chan = dep->chan;
1349
1350 /* keep submitting up until a channel switch is detected
1351 * in that case we will be called again as a result of
1352 * processing the interrupt from async_tx_channel_switch
1353 */
1354 for (; dep; dep = dep_next) {
1355 txd_lock(dep);
1356 txd_clear_parent(dep);
1357 dep_next = txd_next(dep);
1358 if (dep_next && dep_next->chan == chan)
1359 txd_clear_next(dep); /* ->next will be submitted */
1360 else
1361 dep_next = NULL; /* submit current dep and terminate */
1362 txd_unlock(dep);
1363
1364 dep->tx_submit(dep);
1365 }
1366
1367 chan->device->device_issue_pending(chan);
1368 }
1369 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1370
dma_bus_init(void)1371 static int __init dma_bus_init(void)
1372 {
1373 int err = dmaengine_init_unmap_pool();
1374
1375 if (err)
1376 return err;
1377 return class_register(&dma_devclass);
1378 }
1379 arch_initcall(dma_bus_init);
1380
1381
1382