Lines Matching +full:sg +full:- +full:micro

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
8 #define pr_fmt(fmt) "DMA-API: " fmt
12 #include <linux/dma-map-ops.h>
31 #define HASH_FN_MASK (HASH_SIZE - 1)
53 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
54 * @list: node on pre-allocated free_entries list
55 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
57 * @type: single, page, sg, coherent
93 /* List of pre-allocated dma_debug_entry's */
98 /* Global disable flag - will be set in case of an error */
124 /* per-driver filter related state */
141 [dma_debug_sg] = "scather-gather",
171 stack_trace_print(entry->stack_entries, entry->stack_len, 0); in dump_entry_trace()
187 if (current_driver && dev && dev->driver == current_driver) in driver_filter()
198 drv = dev->driver; in driver_filter()
206 if (drv->name && in driver_filter()
207 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { in driver_filter()
227 show_num_errors -= 1; \
233 * Every DMA-API request is saved into a struct dma_debug_entry. To
240 * We use bits 20-27 here as the index into the hash in hash_fn()
242 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; in hash_fn()
265 __releases(&bucket->lock) in put_hash_bucket()
267 spin_unlock_irqrestore(&bucket->lock, flags); in put_hash_bucket()
272 return ((a->dev_addr == b->dev_addr) && in exact_match()
273 (a->dev == b->dev)) ? true : false; in exact_match()
279 if (a->dev != b->dev) in containing_match()
282 if ((b->dev_addr <= a->dev_addr) && in containing_match()
283 ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) in containing_match()
297 int matches = 0, match_lvl, last_lvl = -1; in __hash_bucket_find()
299 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find()
306 * same device addresses being put into the dma-debug in __hash_bucket_find()
309 * best-fit algorithm here which returns the entry from in __hash_bucket_find()
311 * instead of the first-fit. in __hash_bucket_find()
315 entry->size == ref->size ? ++match_lvl : 0; in __hash_bucket_find()
316 entry->type == ref->type ? ++match_lvl : 0; in __hash_bucket_find()
317 entry->direction == ref->direction ? ++match_lvl : 0; in __hash_bucket_find()
318 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; in __hash_bucket_find()
321 /* perfect-fit - return the result */ in __hash_bucket_find()
334 * If we have multiple matches but no perfect-fit, just return in __hash_bucket_find()
353 unsigned int max_range = dma_get_max_seg_size(ref->dev); in bucket_find_contain()
368 index.dev_addr -= (1 << HASH_FN_SHIFT); in bucket_find_contain()
381 list_add_tail(&entry->list, &bucket->list); in hash_bucket_add()
389 list_del(&entry->list); in hash_bucket_del()
394 if (entry->type == dma_debug_resource) in phys_addr()
395 return __pfn_to_phys(entry->pfn) + entry->offset; in phys_addr()
397 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; in phys_addr()
412 spin_lock_irqsave(&bucket->lock, flags); in debug_dma_dump_mappings()
414 list_for_each_entry(entry, &bucket->list, list) { in debug_dma_dump_mappings()
415 if (!dev || dev == entry->dev) { in debug_dma_dump_mappings()
416 dev_info(entry->dev, in debug_dma_dump_mappings()
418 type2name[entry->type], idx, in debug_dma_dump_mappings()
419 phys_addr(entry), entry->pfn, in debug_dma_dump_mappings()
420 entry->dev_addr, entry->size, in debug_dma_dump_mappings()
421 dir2name[entry->direction], in debug_dma_dump_mappings()
422 maperr2str[entry->map_err_type]); in debug_dma_dump_mappings()
426 spin_unlock_irqrestore(&bucket->lock, flags); in debug_dma_dump_mappings()
436 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
444 * dma-debug entries in that we need a free dma_debug_entry before
453 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
454 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
459 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + in to_cacheline_number()
460 (entry->offset >> L1_CACHE_SHIFT); in to_cacheline_number()
467 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) in active_cacheline_read_overlap()
480 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) in active_cacheline_set_overlap()
496 * leaking dma-mappings. in active_cacheline_inc_overlap()
507 return active_cacheline_set_overlap(cln, --overlap); in active_cacheline_dec_overlap()
520 if (entry->direction == DMA_TO_DEVICE) in active_cacheline_insert()
525 if (rc == -EEXIST) in active_cacheline_insert()
538 if (entry->direction == DMA_TO_DEVICE) in active_cacheline_remove()
544 * active_cacheline_dec_overlap() returns -1 in that case in active_cacheline_remove()
566 if (rc == -ENOMEM) { in add_dma_entry()
567 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); in add_dma_entry()
571 /* TODO: report -EEXIST errors here as overlapping mappings are in add_dma_entry()
583 return -ENOMEM; in dma_debug_create_entries()
599 list_del(&entry->list); in __dma_entry_alloc()
602 num_free_entries -= 1; in __dma_entry_alloc()
636 pr_err("debugging out of memory - disabling\n"); in dma_entry_alloc()
647 entry->stack_len = stack_trace_save(entry->stack_entries, in dma_entry_alloc()
648 ARRAY_SIZE(entry->stack_entries), in dma_entry_alloc()
661 * add to beginning of the list - this way the entries are in dma_entry_free()
665 list_add(&entry->list, &free_entries); in dma_entry_free()
671 * DMA-API debugging init code
714 len = min(count, (size_t)(NAME_MAX_LEN - 1)); in filter_write()
716 return -EFAULT; in filter_write()
725 * - only use the first token we got in filter_write()
726 * - token delimiter is everything looking like a space in filter_write()
737 pr_info("switching off dma-debug driver filter\n"); in filter_write()
747 for (i = 0; i < NAME_MAX_LEN - 1; ++i) { in filter_write()
779 spin_lock_irqsave(&bucket->lock, flags); in dump_show()
780 list_for_each_entry(entry, &bucket->list, list) { in dump_show()
783 dev_name(entry->dev), in dump_show()
784 dev_driver_string(entry->dev), in dump_show()
785 type2name[entry->type], idx, in dump_show()
786 phys_addr(entry), entry->pfn, in dump_show()
787 entry->dev_addr, entry->size, in dump_show()
788 dir2name[entry->direction], in dump_show()
789 maperr2str[entry->map_err_type]); in dump_show()
791 spin_unlock_irqrestore(&bucket->lock, flags); in dump_show()
799 struct dentry *dentry = debugfs_create_dir("dma-api", NULL); in dma_debug_fs_init()
821 if (entry->dev == dev) { in device_dma_allocations()
852 count, entry->dev_addr, entry->size, in dma_debug_device_change()
853 dir2name[entry->direction], type2name[entry->type]); in dma_debug_device_change()
875 nb->notifier_call = dma_debug_device_change; in dma_debug_add_bus()
906 pr_err("debugging out of memory error - disabled\n"); in dma_debug_init()
923 return -EINVAL; in dma_debug_cmdline()
936 return -EINVAL; in dma_debug_entries_cmdline()
958 if (dma_mapping_error(ref->dev, ref->dev_addr)) { in check_unmap()
959 err_printk(ref->dev, NULL, in check_unmap()
963 err_printk(ref->dev, NULL, in check_unmap()
967 ref->dev_addr, ref->size); in check_unmap()
972 if (ref->size != entry->size) { in check_unmap()
973 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
977 ref->dev_addr, entry->size, ref->size); in check_unmap()
980 if (ref->type != entry->type) { in check_unmap()
981 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
985 ref->dev_addr, ref->size, in check_unmap()
986 type2name[entry->type], type2name[ref->type]); in check_unmap()
987 } else if ((entry->type == dma_debug_coherent) && in check_unmap()
989 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
994 ref->dev_addr, ref->size, in check_unmap()
999 if (ref->sg_call_ents && ref->type == dma_debug_sg && in check_unmap()
1000 ref->sg_call_ents != entry->sg_call_ents) { in check_unmap()
1001 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
1002 "DMA sg list with different entry count " in check_unmap()
1004 entry->sg_call_ents, ref->sg_call_ents); in check_unmap()
1008 * This may be no bug in reality - but most implementations of the in check_unmap()
1011 if (ref->direction != entry->direction) { in check_unmap()
1012 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
1016 ref->dev_addr, ref->size, in check_unmap()
1017 dir2name[entry->direction], in check_unmap()
1018 dir2name[ref->direction]); in check_unmap()
1024 * If not, print this warning message. See Documentation/core-api/dma-api.rst. in check_unmap()
1026 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { in check_unmap()
1027 err_printk(ref->dev, entry, in check_unmap()
1031 ref->dev_addr, ref->size, in check_unmap()
1032 type2name[entry->type]); in check_unmap()
1048 /* Stack is direct-mapped. */ in check_for_stack()
1058 for (i = 0; i < stack_vm_area->nr_pages; i++) { in check_for_stack()
1059 if (page != stack_vm_area->pages[i]) in check_for_stack()
1062 addr = (u8 *)current->stack + i * PAGE_SIZE + offset; in check_for_stack()
1102 (unsigned long long)ref->dev_addr, ref->size); in check_sync()
1106 if (ref->size > entry->size) { in check_sync()
1112 entry->dev_addr, entry->size, in check_sync()
1113 ref->size); in check_sync()
1116 if (entry->direction == DMA_BIDIRECTIONAL) in check_sync()
1119 if (ref->direction != entry->direction) { in check_sync()
1124 (unsigned long long)ref->dev_addr, entry->size, in check_sync()
1125 dir2name[entry->direction], in check_sync()
1126 dir2name[ref->direction]); in check_sync()
1129 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && in check_sync()
1130 !(ref->direction == DMA_TO_DEVICE)) in check_sync()
1132 "device read-only DMA memory for cpu " in check_sync()
1135 (unsigned long long)ref->dev_addr, entry->size, in check_sync()
1136 dir2name[entry->direction], in check_sync()
1137 dir2name[ref->direction]); in check_sync()
1139 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && in check_sync()
1140 !(ref->direction == DMA_FROM_DEVICE)) in check_sync()
1142 "device write-only DMA memory to device " in check_sync()
1145 (unsigned long long)ref->dev_addr, entry->size, in check_sync()
1146 dir2name[entry->direction], in check_sync()
1147 dir2name[ref->direction]); in check_sync()
1149 if (ref->sg_call_ents && ref->type == dma_debug_sg && in check_sync()
1150 ref->sg_call_ents != entry->sg_call_ents) { in check_sync()
1151 err_printk(ref->dev, entry, "device driver syncs " in check_sync()
1152 "DMA sg list with different entry count " in check_sync()
1154 entry->sg_call_ents, ref->sg_call_ents); in check_sync()
1161 static void check_sg_segment(struct device *dev, struct scatterlist *sg) in check_sg_segment() argument
1171 if (sg->length > max_seg) in check_sg_segment()
1172 …err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n… in check_sg_segment()
1173 sg->length, max_seg); in check_sg_segment()
1179 start = sg_dma_address(sg); in check_sg_segment()
1180 end = start + sg_dma_len(sg) - 1; in check_sg_segment()
1182 …err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [bound… in check_sg_segment()
1218 entry->dev = dev; in debug_dma_map_page()
1219 entry->type = dma_debug_single; in debug_dma_map_page()
1220 entry->pfn = page_to_pfn(page); in debug_dma_map_page()
1221 entry->offset = offset; in debug_dma_map_page()
1222 entry->dev_addr = dma_addr; in debug_dma_map_page()
1223 entry->size = size; in debug_dma_map_page()
1224 entry->direction = direction; in debug_dma_map_page()
1225 entry->map_err_type = MAP_ERR_NOT_CHECKED; in debug_dma_map_page()
1252 list_for_each_entry(entry, &bucket->list, list) { in debug_dma_mapping_error()
1259 * same device addresses being put into the dma-debug in debug_dma_mapping_error()
1262 * best-fit algorithm here which updates the first entry in debug_dma_mapping_error()
1266 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { in debug_dma_mapping_error()
1267 entry->map_err_type = MAP_ERR_CHECKED; in debug_dma_mapping_error()
1292 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, in debug_dma_map_sg() argument
1302 for_each_sg(sg, s, mapped_ents, i) { in debug_dma_map_sg()
1307 entry->type = dma_debug_sg; in debug_dma_map_sg()
1308 entry->dev = dev; in debug_dma_map_sg()
1309 entry->pfn = page_to_pfn(sg_page(s)); in debug_dma_map_sg()
1310 entry->offset = s->offset; in debug_dma_map_sg()
1311 entry->size = sg_dma_len(s); in debug_dma_map_sg()
1312 entry->dev_addr = sg_dma_address(s); in debug_dma_map_sg()
1313 entry->direction = direction; in debug_dma_map_sg()
1314 entry->sg_call_ents = nents; in debug_dma_map_sg()
1315 entry->sg_mapped_ents = mapped_ents; in debug_dma_map_sg()
1317 check_for_stack(dev, sg_page(s), s->offset); in debug_dma_map_sg()
1342 mapped_ents = entry->sg_mapped_ents; in get_nr_mapped_entries()
1363 .offset = s->offset, in debug_dma_unmap_sg()
1399 entry->type = dma_debug_coherent; in debug_dma_alloc_coherent()
1400 entry->dev = dev; in debug_dma_alloc_coherent()
1401 entry->offset = offset_in_page(virt); in debug_dma_alloc_coherent()
1402 entry->size = size; in debug_dma_alloc_coherent()
1403 entry->dev_addr = dma_addr; in debug_dma_alloc_coherent()
1404 entry->direction = DMA_BIDIRECTIONAL; in debug_dma_alloc_coherent()
1407 entry->pfn = vmalloc_to_pfn(virt); in debug_dma_alloc_coherent()
1409 entry->pfn = page_to_pfn(virt_to_page(virt)); in debug_dma_alloc_coherent()
1453 entry->type = dma_debug_resource; in debug_dma_map_resource()
1454 entry->dev = dev; in debug_dma_map_resource()
1455 entry->pfn = PHYS_PFN(addr); in debug_dma_map_resource()
1456 entry->offset = offset_in_page(addr); in debug_dma_map_resource()
1457 entry->size = size; in debug_dma_map_resource()
1458 entry->dev_addr = dma_addr; in debug_dma_map_resource()
1459 entry->direction = direction; in debug_dma_map_resource()
1460 entry->map_err_type = MAP_ERR_NOT_CHECKED; in debug_dma_map_resource()
1519 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, in debug_dma_sync_sg_for_cpu() argument
1528 for_each_sg(sg, s, nelems, i) { in debug_dma_sync_sg_for_cpu()
1534 .offset = s->offset, in debug_dma_sync_sg_for_cpu()
1551 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, in debug_dma_sync_sg_for_device() argument
1560 for_each_sg(sg, s, nelems, i) { in debug_dma_sync_sg_for_device()
1566 .offset = s->offset, in debug_dma_sync_sg_for_device()
1586 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { in dma_debug_driver_setup()