Lines Matching full:pages

29  * - Pool collects resently freed pages for reuse
31 * - doesn't track currently in use pages
59 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
65 * @list: Pool of free uc/wc pages for fast reuse.
67 * @npages: Number of pages in pool.
100 * @free_interval: minimum number of jiffies between freeing pages from pool.
103 * some pages to free.
104 * @small_allocation: Limit in number of pages what is small allocation.
164 /* Convert kb to number of pages */ in ttm_pool_store()
246 /* set memory back to wb and free the pages. */
247 static void ttm_pages_put(struct page *pages[], unsigned npages, in ttm_pages_put() argument
253 if (ttm_set_pages_array_wb(pages, npages)) in ttm_pages_put()
254 pr_err("Failed to set %d pages to wb!\n", npages); in ttm_pages_put()
259 if (ttm_set_pages_wb(pages[i], pages_nr)) in ttm_pages_put()
260 pr_err("Failed to set %d pages to wb!\n", pages_nr); in ttm_pages_put()
262 __free_pages(pages[i], order); in ttm_pages_put()
274 * Free pages from pool.
277 * number of pages in one go.
279 * @pool: to free the pages from
280 * @free_all: If set to true will free all pages in pool
317 /* remove range of pages from the pool */ in ttm_page_pool_free()
351 /* remove range of pages from the pool */ in ttm_page_pool_free()
442 static int ttm_set_pages_caching(struct page **pages, in ttm_set_pages_caching() argument
449 r = ttm_set_pages_array_uc(pages, cpages); in ttm_set_pages_caching()
451 pr_err("Failed to set %d pages to uc!\n", cpages); in ttm_set_pages_caching()
454 r = ttm_set_pages_array_wc(pages, cpages); in ttm_set_pages_caching()
456 pr_err("Failed to set %d pages to wc!\n", cpages); in ttm_set_pages_caching()
465 * Free pages the pages that failed to change the caching state. If there is
466 * any pages that have changed their caching state already put them to the
469 static void ttm_handle_caching_state_failure(struct list_head *pages, in ttm_handle_caching_state_failure() argument
474 /* Failed pages have to be freed */ in ttm_handle_caching_state_failure()
482 * Allocate new pages with correct caching.
485 * pages returned in pages array.
487 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, in ttm_alloc_new_pages() argument
503 pr_debug("Unable to allocate table for new pages\n"); in ttm_alloc_new_pages()
513 /* store already allocated pages in the pool after in ttm_alloc_new_pages()
519 ttm_handle_caching_state_failure(pages, in ttm_alloc_new_pages()
527 list_add(&p->lru, pages); in ttm_alloc_new_pages()
544 ttm_handle_caching_state_failure(pages, in ttm_alloc_new_pages()
557 ttm_handle_caching_state_failure(pages, in ttm_alloc_new_pages()
568 * Fill the given pool if there aren't enough pages and the requested number of
569 * pages is small.
580 * If pool doesn't have enough pages for the allocation new pages are in ttm_page_pool_fill_locked()
589 * pages in a pool we fill the pool up first. */ in ttm_page_pool_fill_locked()
612 /* If we have any pages left put them to the pool. */ in ttm_page_pool_fill_locked()
625 * Allocate pages from the pool and put them on the return list.
630 struct list_head *pages, in ttm_page_pool_get_pages() argument
646 /* take all pages from the pool */ in ttm_page_pool_get_pages()
647 list_splice_init(&pool->list, pages); in ttm_page_pool_get_pages()
652 /* find the last pages to include for requested number of pages. Split in ttm_page_pool_get_pages()
667 /* Cut 'count' number of pages from the pool */ in ttm_page_pool_get_pages()
668 list_cut_position(pages, &pool->list, p); in ttm_page_pool_get_pages()
674 /* clear the pages coming from the pool if requested */ in ttm_page_pool_get_pages()
678 list_for_each_entry(page, pages, lru) { in ttm_page_pool_get_pages()
686 /* If pool didn't have enough pages allocate new one. */ in ttm_page_pool_get_pages()
700 r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, in ttm_page_pool_get_pages()
707 /* Put all pages in pages list to correct pool to wait for reuse */
708 static void ttm_put_pages(struct page **pages, unsigned npages, int flags, in ttm_put_pages() argument
719 /* No pool for this memory type so free the pages */ in ttm_put_pages()
723 struct page *p = pages[i]; in ttm_put_pages()
727 if (!pages[i]) { in ttm_put_pages()
736 if (++p != pages[i + j]) in ttm_put_pages()
744 if (page_count(pages[i]) != 1) in ttm_put_pages()
745 pr_err("Erroneous page count. Leaking pages.\n"); in ttm_put_pages()
746 __free_pages(pages[i], order); in ttm_put_pages()
750 pages[i++] = NULL; in ttm_put_pages()
764 struct page *p = pages[i]; in ttm_put_pages()
771 if (++p != pages[i + j]) in ttm_put_pages()
777 list_add_tail(&pages[i]->lru, &huge->list); in ttm_put_pages()
780 pages[i++] = NULL; in ttm_put_pages()
799 if (pages[i]) { in ttm_put_pages()
800 if (page_count(pages[i]) != 1) in ttm_put_pages()
801 pr_err("Erroneous page count. Leaking pages.\n"); in ttm_put_pages()
802 list_add_tail(&pages[i]->lru, &pool->list); in ttm_put_pages()
803 pages[i] = NULL; in ttm_put_pages()
812 /* free at least NUM_PAGES_TO_ALLOC number of pages in ttm_put_pages()
823 * On success pages list will hold count number of correctly
824 * cached pages.
826 static int ttm_get_pages(struct page **pages, unsigned npages, int flags, in ttm_get_pages() argument
838 /* No pool for cached pages */ in ttm_get_pages()
873 pages[i++] = p++; in ttm_get_pages()
888 /* Swap the pages if we detect consecutive order */ in ttm_get_pages()
889 if (i > first && pages[i - 1] == p - 1) in ttm_get_pages()
890 swap(p, pages[i - 1]); in ttm_get_pages()
892 pages[i++] = p; in ttm_get_pages()
911 pages[count++] = &p[j]; in ttm_get_pages()
924 /* Swap the pages if we detect consecutive order */ in ttm_get_pages()
925 if (count > first && pages[count - 1] == tmp - 1) in ttm_get_pages()
926 swap(tmp, pages[count - 1]); in ttm_get_pages()
927 pages[count++] = tmp; in ttm_get_pages()
931 /* If there is any pages in the list put them back to in ttm_get_pages()
934 pr_debug("Failed to allocate extra pages for large request\n"); in ttm_get_pages()
935 ttm_put_pages(pages, count, flags, cstate); in ttm_get_pages()
1038 if (!ttm->pages[i]) in ttm_pool_unpopulate_helper()
1041 ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE); in ttm_pool_unpopulate_helper()
1045 ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, in ttm_pool_unpopulate_helper()
1062 ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, in ttm_pool_populate()
1070 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], in ttm_pool_populate()
1108 struct page *p = tt->ttm.pages[i]; in ttm_populate_and_map_pages()
1112 if (++p != tt->ttm.pages[j]) in ttm_populate_and_map_pages()
1118 tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], in ttm_populate_and_map_pages()
1145 struct page *p = tt->ttm.pages[i]; in ttm_unmap_and_unpopulate_pages()
1148 if (!tt->dma_address[i] || !tt->ttm.pages[i]) { in ttm_unmap_and_unpopulate_pages()
1154 if (++p != tt->ttm.pages[j]) in ttm_unmap_and_unpopulate_pages()
1173 char *h[] = {"pool", "refills", "pages freed", "size"}; in ttm_page_alloc_debugfs()