Lines Matching full:entry
166 static inline void dump_entry_trace(struct dma_debug_entry *entry) in dump_entry_trace() argument
169 if (entry) { in dump_entry_trace()
171 stack_trace_print(entry->stack_entries, entry->stack_len, 0); in dump_entry_trace()
217 #define err_printk(dev, entry, format, arg...) do { \ argument
224 dump_entry_trace(entry); \
236 static int hash_fn(struct dma_debug_entry *entry) in hash_fn() argument
242 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; in hash_fn()
248 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, in get_hash_bucket() argument
252 int idx = hash_fn(entry); in get_hash_bucket()
290 * Search a given entry in the hash bucket list
296 struct dma_debug_entry *entry, *ret = NULL; in __hash_bucket_find() local
299 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find()
300 if (!match(ref, entry)) in __hash_bucket_find()
309 * best-fit algorithm here which returns the entry from in __hash_bucket_find()
315 entry->size == ref->size ? ++match_lvl : 0; in __hash_bucket_find()
316 entry->type == ref->type ? ++match_lvl : 0; in __hash_bucket_find()
317 entry->direction == ref->direction ? ++match_lvl : 0; in __hash_bucket_find()
318 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; in __hash_bucket_find()
322 return entry; in __hash_bucket_find()
325 * We found an entry that fits better then the in __hash_bucket_find()
329 ret = entry; in __hash_bucket_find()
354 struct dma_debug_entry *entry, index = *ref; in bucket_find_contain() local
358 entry = __hash_bucket_find(*bucket, ref, containing_match); in bucket_find_contain()
360 if (entry) in bucket_find_contain()
361 return entry; in bucket_find_contain()
376 * Add an entry to a hash bucket
379 struct dma_debug_entry *entry) in hash_bucket_add() argument
381 list_add_tail(&entry->list, &bucket->list); in hash_bucket_add()
385 * Remove entry from a hash bucket list
387 static void hash_bucket_del(struct dma_debug_entry *entry) in hash_bucket_del() argument
389 list_del(&entry->list); in hash_bucket_del()
392 static unsigned long long phys_addr(struct dma_debug_entry *entry) in phys_addr() argument
394 if (entry->type == dma_debug_resource) in phys_addr()
395 return __pfn_to_phys(entry->pfn) + entry->offset; in phys_addr()
397 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; in phys_addr()
409 struct dma_debug_entry *entry; in debug_dma_dump_mappings() local
414 list_for_each_entry(entry, &bucket->list, list) { in debug_dma_dump_mappings()
415 if (!dev || dev == entry->dev) { in debug_dma_dump_mappings()
416 dev_info(entry->dev, in debug_dma_dump_mappings()
418 type2name[entry->type], idx, in debug_dma_dump_mappings()
419 phys_addr(entry), entry->pfn, in debug_dma_dump_mappings()
420 entry->dev_addr, entry->size, in debug_dma_dump_mappings()
421 dir2name[entry->direction], in debug_dma_dump_mappings()
422 maperr2str[entry->map_err_type]); in debug_dma_dump_mappings()
436 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
437 * the entry already exists at insertion time add a tag as a reference
447 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
457 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) in to_cacheline_number() argument
459 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + in to_cacheline_number()
460 (entry->offset >> L1_CACHE_SHIFT); in to_cacheline_number()
510 static int active_cacheline_insert(struct dma_debug_entry *entry) in active_cacheline_insert() argument
512 phys_addr_t cln = to_cacheline_number(entry); in active_cacheline_insert()
520 if (entry->direction == DMA_TO_DEVICE) in active_cacheline_insert()
524 rc = radix_tree_insert(&dma_active_cacheline, cln, entry); in active_cacheline_insert()
532 static void active_cacheline_remove(struct dma_debug_entry *entry) in active_cacheline_remove() argument
534 phys_addr_t cln = to_cacheline_number(entry); in active_cacheline_remove()
538 if (entry->direction == DMA_TO_DEVICE) in active_cacheline_remove()
552 * Wrapper function for adding an entry to the hash.
555 static void add_dma_entry(struct dma_debug_entry *entry) in add_dma_entry() argument
561 bucket = get_hash_bucket(entry, &flags); in add_dma_entry()
562 hash_bucket_add(bucket, entry); in add_dma_entry()
565 rc = active_cacheline_insert(entry); in add_dma_entry()
578 struct dma_debug_entry *entry; in dma_debug_create_entries() local
581 entry = (void *)get_zeroed_page(gfp); in dma_debug_create_entries()
582 if (!entry) in dma_debug_create_entries()
586 list_add_tail(&entry[i].list, &free_entries); in dma_debug_create_entries()
596 struct dma_debug_entry *entry; in __dma_entry_alloc() local
598 entry = list_entry(free_entries.next, struct dma_debug_entry, list); in __dma_entry_alloc()
599 list_del(&entry->list); in __dma_entry_alloc()
600 memset(entry, 0, sizeof(*entry)); in __dma_entry_alloc()
606 return entry; in __dma_entry_alloc()
628 struct dma_debug_entry *entry; in dma_entry_alloc() local
642 entry = __dma_entry_alloc(); in dma_entry_alloc()
647 entry->stack_len = stack_trace_save(entry->stack_entries, in dma_entry_alloc()
648 ARRAY_SIZE(entry->stack_entries), in dma_entry_alloc()
651 return entry; in dma_entry_alloc()
654 static void dma_entry_free(struct dma_debug_entry *entry) in dma_entry_free() argument
658 active_cacheline_remove(entry); in dma_entry_free()
665 list_add(&entry->list, &free_entries); in dma_entry_free()
776 struct dma_debug_entry *entry; in dump_show() local
780 list_for_each_entry(entry, &bucket->list, list) { in dump_show()
783 dev_name(entry->dev), in dump_show()
784 dev_driver_string(entry->dev), in dump_show()
785 type2name[entry->type], idx, in dump_show()
786 phys_addr(entry), entry->pfn, in dump_show()
787 entry->dev_addr, entry->size, in dump_show()
788 dir2name[entry->direction], in dump_show()
789 maperr2str[entry->map_err_type]); in dump_show()
814 struct dma_debug_entry *entry; in device_dma_allocations() local
820 list_for_each_entry(entry, &dma_entry_hash[i].list, list) { in device_dma_allocations()
821 if (entry->dev == dev) { in device_dma_allocations()
823 *out_entry = entry; in device_dma_allocations()
835 struct dma_debug_entry *entry; in dma_debug_device_change() local
843 count = device_dma_allocations(dev, &entry); in dma_debug_device_change()
846 err_printk(dev, entry, "device driver has pending " in dma_debug_device_change()
852 count, entry->dev_addr, entry->size, in dma_debug_device_change()
853 dir2name[entry->direction], type2name[entry->type]); in dma_debug_device_change()
947 struct dma_debug_entry *entry; in check_unmap() local
952 entry = bucket_find_exact(bucket, ref); in check_unmap()
954 if (!entry) { in check_unmap()
972 if (ref->size != entry->size) { in check_unmap()
973 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
977 ref->dev_addr, entry->size, ref->size); in check_unmap()
980 if (ref->type != entry->type) { in check_unmap()
981 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
986 type2name[entry->type], type2name[ref->type]); in check_unmap()
987 } else if ((entry->type == dma_debug_coherent) && in check_unmap()
988 (phys_addr(ref) != phys_addr(entry))) { in check_unmap()
989 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
995 phys_addr(entry), in check_unmap()
1000 ref->sg_call_ents != entry->sg_call_ents) { in check_unmap()
1001 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
1002 "DMA sg list with different entry count " in check_unmap()
1004 entry->sg_call_ents, ref->sg_call_ents); in check_unmap()
1011 if (ref->direction != entry->direction) { in check_unmap()
1012 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
1017 dir2name[entry->direction], in check_unmap()
1026 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { in check_unmap()
1027 err_printk(ref->dev, entry, in check_unmap()
1032 type2name[entry->type]); in check_unmap()
1035 hash_bucket_del(entry); in check_unmap()
1036 dma_entry_free(entry); in check_unmap()
1090 struct dma_debug_entry *entry; in check_sync() local
1096 entry = bucket_find_contain(&bucket, ref, &flags); in check_sync()
1098 if (!entry) { in check_sync()
1106 if (ref->size > entry->size) { in check_sync()
1107 err_printk(dev, entry, "device driver syncs" in check_sync()
1112 entry->dev_addr, entry->size, in check_sync()
1116 if (entry->direction == DMA_BIDIRECTIONAL) in check_sync()
1119 if (ref->direction != entry->direction) { in check_sync()
1120 err_printk(dev, entry, "device driver syncs " in check_sync()
1124 (unsigned long long)ref->dev_addr, entry->size, in check_sync()
1125 dir2name[entry->direction], in check_sync()
1129 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && in check_sync()
1131 err_printk(dev, entry, "device driver syncs " in check_sync()
1135 (unsigned long long)ref->dev_addr, entry->size, in check_sync()
1136 dir2name[entry->direction], in check_sync()
1139 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && in check_sync()
1141 err_printk(dev, entry, "device driver syncs " in check_sync()
1145 (unsigned long long)ref->dev_addr, entry->size, in check_sync()
1146 dir2name[entry->direction], in check_sync()
1150 ref->sg_call_ents != entry->sg_call_ents) { in check_sync()
1151 err_printk(ref->dev, entry, "device driver syncs " in check_sync()
1152 "DMA sg list with different entry count " in check_sync()
1154 entry->sg_call_ents, ref->sg_call_ents); in check_sync()
1206 struct dma_debug_entry *entry; in debug_dma_map_page() local
1214 entry = dma_entry_alloc(); in debug_dma_map_page()
1215 if (!entry) in debug_dma_map_page()
1218 entry->dev = dev; in debug_dma_map_page()
1219 entry->type = dma_debug_single; in debug_dma_map_page()
1220 entry->pfn = page_to_pfn(page); in debug_dma_map_page()
1221 entry->offset = offset; in debug_dma_map_page()
1222 entry->dev_addr = dma_addr; in debug_dma_map_page()
1223 entry->size = size; in debug_dma_map_page()
1224 entry->direction = direction; in debug_dma_map_page()
1225 entry->map_err_type = MAP_ERR_NOT_CHECKED; in debug_dma_map_page()
1235 add_dma_entry(entry); in debug_dma_map_page()
1241 struct dma_debug_entry *entry; in debug_dma_mapping_error() local
1252 list_for_each_entry(entry, &bucket->list, list) { in debug_dma_mapping_error()
1253 if (!exact_match(&ref, entry)) in debug_dma_mapping_error()
1262 * best-fit algorithm here which updates the first entry in debug_dma_mapping_error()
1266 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { in debug_dma_mapping_error()
1267 entry->map_err_type = MAP_ERR_CHECKED; in debug_dma_mapping_error()
1295 struct dma_debug_entry *entry; in debug_dma_map_sg() local
1303 entry = dma_entry_alloc(); in debug_dma_map_sg()
1304 if (!entry) in debug_dma_map_sg()
1307 entry->type = dma_debug_sg; in debug_dma_map_sg()
1308 entry->dev = dev; in debug_dma_map_sg()
1309 entry->pfn = page_to_pfn(sg_page(s)); in debug_dma_map_sg()
1310 entry->offset = s->offset; in debug_dma_map_sg()
1311 entry->size = sg_dma_len(s); in debug_dma_map_sg()
1312 entry->dev_addr = sg_dma_address(s); in debug_dma_map_sg()
1313 entry->direction = direction; in debug_dma_map_sg()
1314 entry->sg_call_ents = nents; in debug_dma_map_sg()
1315 entry->sg_mapped_ents = mapped_ents; in debug_dma_map_sg()
1325 add_dma_entry(entry); in debug_dma_map_sg()
1332 struct dma_debug_entry *entry; in get_nr_mapped_entries() local
1338 entry = bucket_find_exact(bucket, ref); in get_nr_mapped_entries()
1341 if (entry) in get_nr_mapped_entries()
1342 mapped_ents = entry->sg_mapped_ents; in get_nr_mapped_entries()
1383 struct dma_debug_entry *entry; in debug_dma_alloc_coherent() local
1395 entry = dma_entry_alloc(); in debug_dma_alloc_coherent()
1396 if (!entry) in debug_dma_alloc_coherent()
1399 entry->type = dma_debug_coherent; in debug_dma_alloc_coherent()
1400 entry->dev = dev; in debug_dma_alloc_coherent()
1401 entry->offset = offset_in_page(virt); in debug_dma_alloc_coherent()
1402 entry->size = size; in debug_dma_alloc_coherent()
1403 entry->dev_addr = dma_addr; in debug_dma_alloc_coherent()
1404 entry->direction = DMA_BIDIRECTIONAL; in debug_dma_alloc_coherent()
1407 entry->pfn = vmalloc_to_pfn(virt); in debug_dma_alloc_coherent()
1409 entry->pfn = page_to_pfn(virt_to_page(virt)); in debug_dma_alloc_coherent()
1411 add_dma_entry(entry); in debug_dma_alloc_coherent()
1444 struct dma_debug_entry *entry; in debug_dma_map_resource() local
1449 entry = dma_entry_alloc(); in debug_dma_map_resource()
1450 if (!entry) in debug_dma_map_resource()
1453 entry->type = dma_debug_resource; in debug_dma_map_resource()
1454 entry->dev = dev; in debug_dma_map_resource()
1455 entry->pfn = PHYS_PFN(addr); in debug_dma_map_resource()
1456 entry->offset = offset_in_page(addr); in debug_dma_map_resource()
1457 entry->size = size; in debug_dma_map_resource()
1458 entry->dev_addr = dma_addr; in debug_dma_map_resource()
1459 entry->direction = direction; in debug_dma_map_resource()
1460 entry->map_err_type = MAP_ERR_NOT_CHECKED; in debug_dma_map_resource()
1462 add_dma_entry(entry); in debug_dma_map_resource()