Lines Matching refs:dma_map
297 struct xsk_dma_map *dma_map; in xp_find_dma_map() local
299 list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) { in xp_find_dma_map()
300 if (dma_map->netdev == pool->netdev) in xp_find_dma_map()
301 return dma_map; in xp_find_dma_map()
310 struct xsk_dma_map *dma_map; in xp_create_dma_map() local
312 dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL); in xp_create_dma_map()
313 if (!dma_map) in xp_create_dma_map()
316 dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL); in xp_create_dma_map()
317 if (!dma_map->dma_pages) { in xp_create_dma_map()
318 kfree(dma_map); in xp_create_dma_map()
322 dma_map->netdev = netdev; in xp_create_dma_map()
323 dma_map->dev = dev; in xp_create_dma_map()
324 dma_map->dma_need_sync = false; in xp_create_dma_map()
325 dma_map->dma_pages_cnt = nr_pages; in xp_create_dma_map()
326 refcount_set(&dma_map->users, 1); in xp_create_dma_map()
327 list_add(&dma_map->list, &umem->xsk_dma_list); in xp_create_dma_map()
328 return dma_map; in xp_create_dma_map()
331 static void xp_destroy_dma_map(struct xsk_dma_map *dma_map) in xp_destroy_dma_map() argument
333 list_del(&dma_map->list); in xp_destroy_dma_map()
334 kvfree(dma_map->dma_pages); in xp_destroy_dma_map()
335 kfree(dma_map); in xp_destroy_dma_map()
338 static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs) in __xp_dma_unmap() argument
343 for (i = 0; i < dma_map->dma_pages_cnt; i++) { in __xp_dma_unmap()
344 dma = &dma_map->dma_pages[i]; in __xp_dma_unmap()
347 dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE, in __xp_dma_unmap()
353 xp_destroy_dma_map(dma_map); in __xp_dma_unmap()
358 struct xsk_dma_map *dma_map; in xp_dma_unmap() local
363 dma_map = xp_find_dma_map(pool); in xp_dma_unmap()
364 if (!dma_map) { in xp_dma_unmap()
369 if (!refcount_dec_and_test(&dma_map->users)) in xp_dma_unmap()
372 __xp_dma_unmap(dma_map, attrs); in xp_dma_unmap()
380 static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map) in xp_check_dma_contiguity() argument
384 for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) { in xp_check_dma_contiguity()
385 if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1]) in xp_check_dma_contiguity()
386 dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; in xp_check_dma_contiguity()
388 dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; in xp_check_dma_contiguity()
392 static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) in xp_init_dma_info() argument
400 xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr); in xp_init_dma_info()
404 pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL); in xp_init_dma_info()
408 pool->dev = dma_map->dev; in xp_init_dma_info()
409 pool->dma_pages_cnt = dma_map->dma_pages_cnt; in xp_init_dma_info()
410 pool->dma_need_sync = dma_map->dma_need_sync; in xp_init_dma_info()
411 memcpy(pool->dma_pages, dma_map->dma_pages, in xp_init_dma_info()
420 struct xsk_dma_map *dma_map; in xp_dma_map() local
425 dma_map = xp_find_dma_map(pool); in xp_dma_map()
426 if (dma_map) { in xp_dma_map()
427 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
431 refcount_inc(&dma_map->users); in xp_dma_map()
435 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); in xp_dma_map()
436 if (!dma_map) in xp_dma_map()
439 for (i = 0; i < dma_map->dma_pages_cnt; i++) { in xp_dma_map()
443 __xp_dma_unmap(dma_map, attrs); in xp_dma_map()
447 dma_map->dma_need_sync = true; in xp_dma_map()
448 dma_map->dma_pages[i] = dma; in xp_dma_map()
452 xp_check_dma_contiguity(dma_map); in xp_dma_map()
454 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
456 __xp_dma_unmap(dma_map, attrs); in xp_dma_map()