Lines Matching +full:charge +full:- +full:current +full:- +full:limit +full:- +full:mapping
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
49 #include <linux/dma-mapping.h>
57 #include <xen/hvc-console.h>
58 #include <xen/swiotlb-xen.h>
63 #include <xen/mem-reservation.h>
99 * Mapping a list of frames for storing grant entries. Frames parameter
166 /* This can be used as an l-value */
178 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { in get_free_entries()
184 gnttab_free_count -= count; in get_free_entries()
185 while (count-- > 1) in get_free_entries()
203 next = callback->next; in do_free_callbacks()
204 if (gnttab_free_count >= callback->count) { in do_free_callbacks()
205 callback->next = NULL; in do_free_callbacks()
206 callback->fn(callback->arg); in do_free_callbacks()
208 callback->next = gnttab_free_callback_list; in do_free_callbacks()
235 * 1. Write ent->domid.
236 * 2. Write ent->frame:
238 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
241 * 4. Write ent->flags, inc. valid type.
262 * Public grant-issuing interface functions
267 gnttab_interface->update_entry(ref, domid, frame, in gnttab_grant_foreign_access_ref()
279 return -ENOSPC; in gnttab_grant_foreign_access()
299 return gnttab_interface->query_foreign_access(ref); in gnttab_query_foreign_access()
344 return gnttab_interface->end_foreign_access_ref(ref, readonly); in _gnttab_end_foreign_access_ref()
374 while (nr--) { in gnttab_handle_deferred()
381 list_del(&entry->list); in gnttab_handle_deferred()
383 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { in gnttab_handle_deferred()
384 put_free_entry(entry->ref); in gnttab_handle_deferred()
385 if (entry->page) { in gnttab_handle_deferred()
387 entry->ref, page_to_pfn(entry->page)); in gnttab_handle_deferred()
388 put_page(entry->page); in gnttab_handle_deferred()
390 pr_info("freeing g.e. %#x\n", entry->ref); in gnttab_handle_deferred()
394 if (!--entry->warn_delay) in gnttab_handle_deferred()
395 pr_info("g.e. %#x still pending\n", entry->ref); in gnttab_handle_deferred()
401 list_add_tail(&entry->list, &deferred_list); in gnttab_handle_deferred()
421 entry->ref = ref; in gnttab_add_deferred()
422 entry->ro = readonly; in gnttab_add_deferred()
423 entry->page = page; in gnttab_add_deferred()
424 entry->warn_delay = 60; in gnttab_add_deferred()
426 list_add_tail(&entry->list, &deferred_list); in gnttab_add_deferred()
435 what, ref, page ? page_to_pfn(page) : -1); in gnttab_add_deferred()
457 return -ENOSPC; in gnttab_grant_foreign_transfer()
467 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer); in gnttab_grant_foreign_transfer_ref()
535 return gnttab_interface->end_foreign_transfer_ref(ref); in gnttab_end_foreign_transfer_ref()
579 return -ENOSPC; in gnttab_alloc_grant_references()
597 return -ENOSPC; in gnttab_claim_grant_reference()
624 cb = cb->next; in gnttab_request_free_callback()
627 callback->fn = fn; in gnttab_request_free_callback()
628 callback->arg = arg; in gnttab_request_free_callback()
629 callback->count = count; in gnttab_request_free_callback()
630 callback->next = gnttab_free_callback_list; in gnttab_request_free_callback()
644 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { in gnttab_cancel_free_callback()
646 *pcb = callback->next; in gnttab_cancel_free_callback()
656 return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) / in gnttab_frames()
666 grefs_per_frame = gnttab_interface->grefs_per_grant_frame; in grow_gnttab_list()
681 i < grefs_per_frame * new_nr_grant_frames - 1; i++) in grow_gnttab_list()
695 while (i-- > nr_glist_frames) in grow_gnttab_list()
697 return -ENOMEM; in grow_gnttab_list()
737 return -EINVAL; in gnttab_setup_auto_xlat_frames()
743 return -ENOMEM; in gnttab_setup_auto_xlat_frames()
748 return -ENOMEM; in gnttab_setup_auto_xlat_frames()
784 return -ENOMEM; in gnttab_pages_set_private()
796 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
819 cache->pages = NULL; in cache_init()
824 return !cache->pages; in cache_empty()
831 page = cache->pages; in cache_deq()
832 cache->pages = page->zone_device_data; in cache_deq()
839 page->zone_device_data = cache->pages; in cache_enq()
840 cache->pages = page; in cache_enq()
845 INIT_LIST_HEAD(&cache->pages); in cache_init()
850 return list_empty(&cache->pages); in cache_empty()
857 page = list_first_entry(&cache->pages, struct page, lru); in cache_deq()
858 list_del(&page->lru); in cache_deq()
865 list_add(&page->lru, &cache->pages); in cache_enq()
871 spin_lock_init(&cache->lock); in gnttab_page_cache_init()
873 cache->num_pages = 0; in gnttab_page_cache_init()
881 spin_lock_irqsave(&cache->lock, flags); in gnttab_page_cache_get()
884 spin_unlock_irqrestore(&cache->lock, flags); in gnttab_page_cache_get()
889 cache->num_pages--; in gnttab_page_cache_get()
891 spin_unlock_irqrestore(&cache->lock, flags); in gnttab_page_cache_get()
903 spin_lock_irqsave(&cache->lock, flags); in gnttab_page_cache_put()
907 cache->num_pages += num; in gnttab_page_cache_put()
909 spin_unlock_irqrestore(&cache->lock, flags); in gnttab_page_cache_put()
919 spin_lock_irqsave(&cache->lock, flags); in gnttab_page_cache_shrink()
921 while (cache->num_pages > num) { in gnttab_page_cache_shrink()
923 cache->num_pages--; in gnttab_page_cache_shrink()
925 spin_unlock_irqrestore(&cache->lock, flags); in gnttab_page_cache_shrink()
928 spin_lock_irqsave(&cache->lock, flags); in gnttab_page_cache_shrink()
932 spin_unlock_irqrestore(&cache->lock, flags); in gnttab_page_cache_shrink()
955 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
968 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
977 size = args->nr_pages << PAGE_SHIFT; in gnttab_dma_alloc_pages()
978 if (args->coherent) in gnttab_dma_alloc_pages()
979 args->vaddr = dma_alloc_coherent(args->dev, size, in gnttab_dma_alloc_pages()
980 &args->dev_bus_addr, in gnttab_dma_alloc_pages()
983 args->vaddr = dma_alloc_wc(args->dev, size, in gnttab_dma_alloc_pages()
984 &args->dev_bus_addr, in gnttab_dma_alloc_pages()
986 if (!args->vaddr) { in gnttab_dma_alloc_pages()
988 return -ENOMEM; in gnttab_dma_alloc_pages()
991 start_pfn = __phys_to_pfn(args->dev_bus_addr); in gnttab_dma_alloc_pages()
992 for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages; in gnttab_dma_alloc_pages()
996 args->pages[i] = page; in gnttab_dma_alloc_pages()
997 args->frames[i] = xen_page_to_gfn(page); in gnttab_dma_alloc_pages()
1001 xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages); in gnttab_dma_alloc_pages()
1003 ret = xenmem_reservation_decrease(args->nr_pages, args->frames); in gnttab_dma_alloc_pages()
1004 if (ret != args->nr_pages) { in gnttab_dma_alloc_pages()
1006 ret = -EFAULT; in gnttab_dma_alloc_pages()
1010 ret = gnttab_pages_set_private(args->nr_pages, args->pages); in gnttab_dma_alloc_pages()
1023 * gnttab_dma_free_pages - free DMAable pages
1031 gnttab_pages_clear_private(args->nr_pages, args->pages); in gnttab_dma_free_pages()
1033 for (i = 0; i < args->nr_pages; i++) in gnttab_dma_free_pages()
1034 args->frames[i] = page_to_xen_pfn(args->pages[i]); in gnttab_dma_free_pages()
1036 ret = xenmem_reservation_increase(args->nr_pages, args->frames); in gnttab_dma_free_pages()
1037 if (ret != args->nr_pages) { in gnttab_dma_free_pages()
1039 ret = -EFAULT; in gnttab_dma_free_pages()
1044 xenmem_reservation_va_mapping_update(args->nr_pages, args->pages, in gnttab_dma_free_pages()
1045 args->frames); in gnttab_dma_free_pages()
1047 size = args->nr_pages << PAGE_SHIFT; in gnttab_dma_free_pages()
1048 if (args->coherent) in gnttab_dma_free_pages()
1049 dma_free_coherent(args->dev, size, in gnttab_dma_free_pages()
1050 args->vaddr, args->dev_bus_addr); in gnttab_dma_free_pages()
1052 dma_free_wc(args->dev, size, in gnttab_dma_free_pages()
1053 args->vaddr, args->dev_bus_addr); in gnttab_dma_free_pages()
1074 pr_err("%s: %s eagain grant\n", func, current->comm); in gnttab_retry_eagain_gop()
1086 if (op->status == GNTST_eagain) in gnttab_batch_map()
1088 &op->status, __func__); in gnttab_batch_map()
1099 if (op->status == GNTST_eagain) in gnttab_batch_copy()
1101 &op->status, __func__); in gnttab_batch_copy()
1115 len = min_t(unsigned int, PAGE_SIZE - offset, len); in gnttab_foreach_grant_in_range()
1121 glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len); in gnttab_foreach_grant_in_range()
1126 len -= glen; in gnttab_foreach_grant_in_range()
1171 foreign->domid = map_ops[i].dom; in gnttab_map_refs()
1172 foreign->gref = map_ops[i].ref; in gnttab_map_refs()
1177 pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n"); in gnttab_map_refs()
1186 i--; in gnttab_map_refs()
1226 if (unmap_data->age != UINT_MAX) in gnttab_unmap_work()
1227 unmap_data->age++; in gnttab_unmap_work()
1236 for (pc = 0; pc < item->count; pc++) { in __gnttab_unmap_refs_async()
1237 if (page_count(item->pages[pc]) > 1) { in __gnttab_unmap_refs_async()
1238 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1); in __gnttab_unmap_refs_async()
1239 schedule_delayed_work(&item->gnttab_work, in __gnttab_unmap_refs_async()
1245 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops, in __gnttab_unmap_refs_async()
1246 item->pages, item->count); in __gnttab_unmap_refs_async()
1247 item->done(ret, item); in __gnttab_unmap_refs_async()
1252 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work); in gnttab_unmap_refs_async()
1253 item->age = 0; in gnttab_unmap_refs_async()
1262 struct unmap_refs_callback_data *d = data->data; in unmap_refs_callback()
1264 d->result = result; in unmap_refs_callback()
1265 complete(&d->completion); in unmap_refs_callback()
1273 item->data = &data; in gnttab_unmap_refs_sync()
1274 item->done = &unmap_refs_callback; in gnttab_unmap_refs_sync()
1318 return -ENOMEM; in gnttab_map_frames_v2()
1326 if (rc == -ENOSYS) { in gnttab_map_frames_v2()
1328 return -ENOSYS; in gnttab_map_frames_v2()
1380 } while (i-- > start_idx); in gnttab_map()
1390 return -ENOMEM; in gnttab_map()
1397 if (rc == -ENOSYS) { in gnttab_map()
1399 return -ENOSYS; in gnttab_map()
1404 rc = gnttab_interface->map_frames(frames, nr_gframes); in gnttab_map()
1472 gnttab_interface->version); in gnttab_request_version()
1481 return -ENOSYS; in gnttab_setup()
1487 return -ENOMEM; in gnttab_setup()
1490 return gnttab_map(0, nr_grant_frames - 1); in gnttab_setup()
1502 gnttab_interface->unmap_frames(); in gnttab_suspend()
1512 extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) / in gnttab_expand()
1513 gnttab_interface->grefs_per_grant_frame); in gnttab_expand()
1515 pr_warn_ratelimited("xen/grant-table: max_grant_frames reached" in gnttab_expand()
1516 " cur=%u extra=%u limit=%u" in gnttab_expand()
1520 return -ENOSPC; in gnttab_expand()
1523 rc = gnttab_map(cur, cur + extra - 1); in gnttab_expand()
1543 * grant reference free list on the current hypervisor. in gnttab_init()
1546 gnttab_interface->grefs_per_grant_frame / RPP); in gnttab_init()
1552 return -ENOMEM; in gnttab_init()
1558 ret = -ENOMEM; in gnttab_init()
1569 ret = -ENODEV; in gnttab_init()
1574 gnttab_interface->grefs_per_grant_frame; in gnttab_init()
1576 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) in gnttab_init()
1579 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; in gnttab_init()
1580 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; in gnttab_init()
1587 for (i--; i >= 0; i--) in gnttab_init()
1597 return -ENODEV; in __gnttab_init()
1599 /* Delay grant-table initialization in the PV on HVM case */ in __gnttab_init()