1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #define pr_fmt(fmt) "[TTM] " fmt
30 
31 #include <linux/spinlock.h>
32 #include <linux/sched.h>
33 #include <linux/wait.h>
34 #include <linux/mm.h>
35 #include <linux/module.h>
36 #include <linux/slab.h>
37 #include <linux/swap.h>
38 
39 #include <drm/drm_device.h>
40 #include <drm/drm_file.h>
41 #include <drm/ttm/ttm_device.h>
42 
43 #include "ttm_memory.h"
44 
45 #define TTM_MEMORY_ALLOC_RETRIES 4
46 
47 struct ttm_mem_global ttm_mem_glob;
48 EXPORT_SYMBOL(ttm_mem_glob);
49 
50 struct ttm_mem_zone {
51 	struct kobject kobj;
52 	struct ttm_mem_global *glob;
53 	const char *name;
54 	uint64_t zone_mem;
55 	uint64_t emer_mem;
56 	uint64_t max_mem;
57 	uint64_t swap_limit;
58 	uint64_t used_mem;
59 };
60 
61 static struct attribute ttm_mem_sys = {
62 	.name = "zone_memory",
63 	.mode = S_IRUGO
64 };
65 static struct attribute ttm_mem_emer = {
66 	.name = "emergency_memory",
67 	.mode = S_IRUGO | S_IWUSR
68 };
69 static struct attribute ttm_mem_max = {
70 	.name = "available_memory",
71 	.mode = S_IRUGO | S_IWUSR
72 };
73 static struct attribute ttm_mem_swap = {
74 	.name = "swap_limit",
75 	.mode = S_IRUGO | S_IWUSR
76 };
77 static struct attribute ttm_mem_used = {
78 	.name = "used_memory",
79 	.mode = S_IRUGO
80 };
81 
ttm_mem_zone_kobj_release(struct kobject * kobj)82 static void ttm_mem_zone_kobj_release(struct kobject *kobj)
83 {
84 	struct ttm_mem_zone *zone =
85 		container_of(kobj, struct ttm_mem_zone, kobj);
86 
87 	pr_info("Zone %7s: Used memory at exit: %llu KiB\n",
88 		zone->name, (unsigned long long)zone->used_mem >> 10);
89 	kfree(zone);
90 }
91 
ttm_mem_zone_show(struct kobject * kobj,struct attribute * attr,char * buffer)92 static ssize_t ttm_mem_zone_show(struct kobject *kobj,
93 				 struct attribute *attr,
94 				 char *buffer)
95 {
96 	struct ttm_mem_zone *zone =
97 		container_of(kobj, struct ttm_mem_zone, kobj);
98 	uint64_t val = 0;
99 
100 	spin_lock(&zone->glob->lock);
101 	if (attr == &ttm_mem_sys)
102 		val = zone->zone_mem;
103 	else if (attr == &ttm_mem_emer)
104 		val = zone->emer_mem;
105 	else if (attr == &ttm_mem_max)
106 		val = zone->max_mem;
107 	else if (attr == &ttm_mem_swap)
108 		val = zone->swap_limit;
109 	else if (attr == &ttm_mem_used)
110 		val = zone->used_mem;
111 	spin_unlock(&zone->glob->lock);
112 
113 	return snprintf(buffer, PAGE_SIZE, "%llu\n",
114 			(unsigned long long) val >> 10);
115 }
116 
117 static void ttm_check_swapping(struct ttm_mem_global *glob);
118 
ttm_mem_zone_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)119 static ssize_t ttm_mem_zone_store(struct kobject *kobj,
120 				  struct attribute *attr,
121 				  const char *buffer,
122 				  size_t size)
123 {
124 	struct ttm_mem_zone *zone =
125 		container_of(kobj, struct ttm_mem_zone, kobj);
126 	int chars;
127 	unsigned long val;
128 	uint64_t val64;
129 
130 	chars = sscanf(buffer, "%lu", &val);
131 	if (chars == 0)
132 		return size;
133 
134 	val64 = val;
135 	val64 <<= 10;
136 
137 	spin_lock(&zone->glob->lock);
138 	if (val64 > zone->zone_mem)
139 		val64 = zone->zone_mem;
140 	if (attr == &ttm_mem_emer) {
141 		zone->emer_mem = val64;
142 		if (zone->max_mem > val64)
143 			zone->max_mem = val64;
144 	} else if (attr == &ttm_mem_max) {
145 		zone->max_mem = val64;
146 		if (zone->emer_mem < val64)
147 			zone->emer_mem = val64;
148 	} else if (attr == &ttm_mem_swap)
149 		zone->swap_limit = val64;
150 	spin_unlock(&zone->glob->lock);
151 
152 	ttm_check_swapping(zone->glob);
153 
154 	return size;
155 }
156 
157 static struct attribute *ttm_mem_zone_attrs[] = {
158 	&ttm_mem_sys,
159 	&ttm_mem_emer,
160 	&ttm_mem_max,
161 	&ttm_mem_swap,
162 	&ttm_mem_used,
163 	NULL
164 };
165 
166 static const struct sysfs_ops ttm_mem_zone_ops = {
167 	.show = &ttm_mem_zone_show,
168 	.store = &ttm_mem_zone_store
169 };
170 
171 static struct kobj_type ttm_mem_zone_kobj_type = {
172 	.release = &ttm_mem_zone_kobj_release,
173 	.sysfs_ops = &ttm_mem_zone_ops,
174 	.default_attrs = ttm_mem_zone_attrs,
175 };
176 
177 static struct attribute ttm_mem_global_lower_mem_limit = {
178 	.name = "lower_mem_limit",
179 	.mode = S_IRUGO | S_IWUSR
180 };
181 
ttm_mem_global_show(struct kobject * kobj,struct attribute * attr,char * buffer)182 static ssize_t ttm_mem_global_show(struct kobject *kobj,
183 				 struct attribute *attr,
184 				 char *buffer)
185 {
186 	struct ttm_mem_global *glob =
187 		container_of(kobj, struct ttm_mem_global, kobj);
188 	uint64_t val = 0;
189 
190 	spin_lock(&glob->lock);
191 	val = glob->lower_mem_limit;
192 	spin_unlock(&glob->lock);
193 	/* convert from number of pages to KB */
194 	val <<= (PAGE_SHIFT - 10);
195 	return snprintf(buffer, PAGE_SIZE, "%llu\n",
196 			(unsigned long long) val);
197 }
198 
ttm_mem_global_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)199 static ssize_t ttm_mem_global_store(struct kobject *kobj,
200 				  struct attribute *attr,
201 				  const char *buffer,
202 				  size_t size)
203 {
204 	int chars;
205 	uint64_t val64;
206 	unsigned long val;
207 	struct ttm_mem_global *glob =
208 		container_of(kobj, struct ttm_mem_global, kobj);
209 
210 	chars = sscanf(buffer, "%lu", &val);
211 	if (chars == 0)
212 		return size;
213 
214 	val64 = val;
215 	/* convert from KB to number of pages */
216 	val64 >>= (PAGE_SHIFT - 10);
217 
218 	spin_lock(&glob->lock);
219 	glob->lower_mem_limit = val64;
220 	spin_unlock(&glob->lock);
221 
222 	return size;
223 }
224 
225 static struct attribute *ttm_mem_global_attrs[] = {
226 	&ttm_mem_global_lower_mem_limit,
227 	NULL
228 };
229 
230 static const struct sysfs_ops ttm_mem_global_ops = {
231 	.show = &ttm_mem_global_show,
232 	.store = &ttm_mem_global_store,
233 };
234 
235 static struct kobj_type ttm_mem_glob_kobj_type = {
236 	.sysfs_ops = &ttm_mem_global_ops,
237 	.default_attrs = ttm_mem_global_attrs,
238 };
239 
ttm_zones_above_swap_target(struct ttm_mem_global * glob,bool from_wq,uint64_t extra)240 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
241 					bool from_wq, uint64_t extra)
242 {
243 	unsigned int i;
244 	struct ttm_mem_zone *zone;
245 	uint64_t target;
246 
247 	for (i = 0; i < glob->num_zones; ++i) {
248 		zone = glob->zones[i];
249 
250 		if (from_wq)
251 			target = zone->swap_limit;
252 		else if (capable(CAP_SYS_ADMIN))
253 			target = zone->emer_mem;
254 		else
255 			target = zone->max_mem;
256 
257 		target = (extra > target) ? 0ULL : target;
258 
259 		if (zone->used_mem > target)
260 			return true;
261 	}
262 	return false;
263 }
264 
265 /*
266  * At this point we only support a single shrink callback.
267  * Extend this if needed, perhaps using a linked list of callbacks.
268  * Note that this function is reentrant:
269  * many threads may try to swap out at any given time.
270  */
271 
ttm_shrink(struct ttm_mem_global * glob,bool from_wq,uint64_t extra,struct ttm_operation_ctx * ctx)272 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
273 			uint64_t extra, struct ttm_operation_ctx *ctx)
274 {
275 	int ret;
276 
277 	spin_lock(&glob->lock);
278 
279 	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
280 		spin_unlock(&glob->lock);
281 		ret = ttm_global_swapout(ctx, GFP_KERNEL);
282 		spin_lock(&glob->lock);
283 		if (unlikely(ret <= 0))
284 			break;
285 	}
286 
287 	spin_unlock(&glob->lock);
288 }
289 
ttm_shrink_work(struct work_struct * work)290 static void ttm_shrink_work(struct work_struct *work)
291 {
292 	struct ttm_operation_ctx ctx = {
293 		.interruptible = false,
294 		.no_wait_gpu = false
295 	};
296 	struct ttm_mem_global *glob =
297 	    container_of(work, struct ttm_mem_global, work);
298 
299 	ttm_shrink(glob, true, 0ULL, &ctx);
300 }
301 
ttm_mem_init_kernel_zone(struct ttm_mem_global * glob,const struct sysinfo * si)302 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
303 				    const struct sysinfo *si)
304 {
305 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
306 	uint64_t mem;
307 	int ret;
308 
309 	if (unlikely(!zone))
310 		return -ENOMEM;
311 
312 	mem = si->totalram - si->totalhigh;
313 	mem *= si->mem_unit;
314 
315 	zone->name = "kernel";
316 	zone->zone_mem = mem;
317 	zone->max_mem = mem >> 1;
318 	zone->emer_mem = (mem >> 1) + (mem >> 2);
319 	zone->swap_limit = zone->max_mem - (mem >> 3);
320 	zone->used_mem = 0;
321 	zone->glob = glob;
322 	glob->zone_kernel = zone;
323 	ret = kobject_init_and_add(
324 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
325 	if (unlikely(ret != 0)) {
326 		kobject_put(&zone->kobj);
327 		return ret;
328 	}
329 	glob->zones[glob->num_zones++] = zone;
330 	return 0;
331 }
332 
333 #ifdef CONFIG_HIGHMEM
ttm_mem_init_highmem_zone(struct ttm_mem_global * glob,const struct sysinfo * si)334 static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
335 				     const struct sysinfo *si)
336 {
337 	struct ttm_mem_zone *zone;
338 	uint64_t mem;
339 	int ret;
340 
341 	if (si->totalhigh == 0)
342 		return 0;
343 
344 	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
345 	if (unlikely(!zone))
346 		return -ENOMEM;
347 
348 	mem = si->totalram;
349 	mem *= si->mem_unit;
350 
351 	zone->name = "highmem";
352 	zone->zone_mem = mem;
353 	zone->max_mem = mem >> 1;
354 	zone->emer_mem = (mem >> 1) + (mem >> 2);
355 	zone->swap_limit = zone->max_mem - (mem >> 3);
356 	zone->used_mem = 0;
357 	zone->glob = glob;
358 	glob->zone_highmem = zone;
359 	ret = kobject_init_and_add(
360 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
361 		zone->name);
362 	if (unlikely(ret != 0)) {
363 		kobject_put(&zone->kobj);
364 		return ret;
365 	}
366 	glob->zones[glob->num_zones++] = zone;
367 	return 0;
368 }
369 #else
ttm_mem_init_dma32_zone(struct ttm_mem_global * glob,const struct sysinfo * si)370 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
371 				   const struct sysinfo *si)
372 {
373 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
374 	uint64_t mem;
375 	int ret;
376 
377 	if (unlikely(!zone))
378 		return -ENOMEM;
379 
380 	mem = si->totalram;
381 	mem *= si->mem_unit;
382 
383 	/**
384 	 * No special dma32 zone needed.
385 	 */
386 
387 	if (mem <= ((uint64_t) 1ULL << 32)) {
388 		kfree(zone);
389 		return 0;
390 	}
391 
392 	/*
393 	 * Limit max dma32 memory to 4GB for now
394 	 * until we can figure out how big this
395 	 * zone really is.
396 	 */
397 
398 	mem = ((uint64_t) 1ULL << 32);
399 	zone->name = "dma32";
400 	zone->zone_mem = mem;
401 	zone->max_mem = mem >> 1;
402 	zone->emer_mem = (mem >> 1) + (mem >> 2);
403 	zone->swap_limit = zone->max_mem - (mem >> 3);
404 	zone->used_mem = 0;
405 	zone->glob = glob;
406 	glob->zone_dma32 = zone;
407 	ret = kobject_init_and_add(
408 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
409 	if (unlikely(ret != 0)) {
410 		kobject_put(&zone->kobj);
411 		return ret;
412 	}
413 	glob->zones[glob->num_zones++] = zone;
414 	return 0;
415 }
416 #endif
417 
ttm_mem_global_init(struct ttm_mem_global * glob,struct device * dev)418 int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev)
419 {
420 	struct sysinfo si;
421 	int ret;
422 	int i;
423 	struct ttm_mem_zone *zone;
424 
425 	spin_lock_init(&glob->lock);
426 	glob->swap_queue = create_singlethread_workqueue("ttm_swap");
427 	INIT_WORK(&glob->work, ttm_shrink_work);
428 
429 	ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type,
430 				   &dev->kobj, "memory_accounting");
431 	if (unlikely(ret != 0)) {
432 		kobject_put(&glob->kobj);
433 		return ret;
434 	}
435 
436 	si_meminfo(&si);
437 
438 	spin_lock(&glob->lock);
439 	/* set it as 0 by default to keep original behavior of OOM */
440 	glob->lower_mem_limit = 0;
441 	spin_unlock(&glob->lock);
442 
443 	ret = ttm_mem_init_kernel_zone(glob, &si);
444 	if (unlikely(ret != 0))
445 		goto out_no_zone;
446 #ifdef CONFIG_HIGHMEM
447 	ret = ttm_mem_init_highmem_zone(glob, &si);
448 	if (unlikely(ret != 0))
449 		goto out_no_zone;
450 #else
451 	ret = ttm_mem_init_dma32_zone(glob, &si);
452 	if (unlikely(ret != 0))
453 		goto out_no_zone;
454 #endif
455 	for (i = 0; i < glob->num_zones; ++i) {
456 		zone = glob->zones[i];
457 		pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
458 			zone->name, (unsigned long long)zone->max_mem >> 10);
459 	}
460 	return 0;
461 out_no_zone:
462 	ttm_mem_global_release(glob);
463 	return ret;
464 }
465 
ttm_mem_global_release(struct ttm_mem_global * glob)466 void ttm_mem_global_release(struct ttm_mem_global *glob)
467 {
468 	struct ttm_mem_zone *zone;
469 	unsigned int i;
470 
471 	flush_workqueue(glob->swap_queue);
472 	destroy_workqueue(glob->swap_queue);
473 	glob->swap_queue = NULL;
474 	for (i = 0; i < glob->num_zones; ++i) {
475 		zone = glob->zones[i];
476 		kobject_del(&zone->kobj);
477 		kobject_put(&zone->kobj);
478 	}
479 	kobject_del(&glob->kobj);
480 	kobject_put(&glob->kobj);
481 	memset(glob, 0, sizeof(*glob));
482 }
483 
ttm_check_swapping(struct ttm_mem_global * glob)484 static void ttm_check_swapping(struct ttm_mem_global *glob)
485 {
486 	bool needs_swapping = false;
487 	unsigned int i;
488 	struct ttm_mem_zone *zone;
489 
490 	spin_lock(&glob->lock);
491 	for (i = 0; i < glob->num_zones; ++i) {
492 		zone = glob->zones[i];
493 		if (zone->used_mem > zone->swap_limit) {
494 			needs_swapping = true;
495 			break;
496 		}
497 	}
498 
499 	spin_unlock(&glob->lock);
500 
501 	if (unlikely(needs_swapping))
502 		(void)queue_work(glob->swap_queue, &glob->work);
503 
504 }
505 
ttm_mem_global_free_zone(struct ttm_mem_global * glob,struct ttm_mem_zone * single_zone,uint64_t amount)506 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
507 				     struct ttm_mem_zone *single_zone,
508 				     uint64_t amount)
509 {
510 	unsigned int i;
511 	struct ttm_mem_zone *zone;
512 
513 	spin_lock(&glob->lock);
514 	for (i = 0; i < glob->num_zones; ++i) {
515 		zone = glob->zones[i];
516 		if (single_zone && zone != single_zone)
517 			continue;
518 		zone->used_mem -= amount;
519 	}
520 	spin_unlock(&glob->lock);
521 }
522 
ttm_mem_global_free(struct ttm_mem_global * glob,uint64_t amount)523 void ttm_mem_global_free(struct ttm_mem_global *glob,
524 			 uint64_t amount)
525 {
526 	return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount);
527 }
528 EXPORT_SYMBOL(ttm_mem_global_free);
529 
530 /*
531  * check if the available mem is under lower memory limit
532  *
533  * a. if no swap disk at all or free swap space is under swap_mem_limit
534  * but available system mem is bigger than sys_mem_limit, allow TTM
535  * allocation;
536  *
537  * b. if the available system mem is less than sys_mem_limit but free
538  * swap disk is bigger than swap_mem_limit, allow TTM allocation.
539  */
540 bool
ttm_check_under_lowerlimit(struct ttm_mem_global * glob,uint64_t num_pages,struct ttm_operation_ctx * ctx)541 ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
542 			uint64_t num_pages,
543 			struct ttm_operation_ctx *ctx)
544 {
545 	int64_t available;
546 
547 	/* We allow over commit during suspend */
548 	if (ctx->force_alloc)
549 		return false;
550 
551 	available = get_nr_swap_pages() + si_mem_available();
552 	available -= num_pages;
553 	if (available < glob->lower_mem_limit)
554 		return true;
555 
556 	return false;
557 }
558 
ttm_mem_global_reserve(struct ttm_mem_global * glob,struct ttm_mem_zone * single_zone,uint64_t amount,bool reserve)559 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
560 				  struct ttm_mem_zone *single_zone,
561 				  uint64_t amount, bool reserve)
562 {
563 	uint64_t limit;
564 	int ret = -ENOMEM;
565 	unsigned int i;
566 	struct ttm_mem_zone *zone;
567 
568 	spin_lock(&glob->lock);
569 	for (i = 0; i < glob->num_zones; ++i) {
570 		zone = glob->zones[i];
571 		if (single_zone && zone != single_zone)
572 			continue;
573 
574 		limit = (capable(CAP_SYS_ADMIN)) ?
575 			zone->emer_mem : zone->max_mem;
576 
577 		if (zone->used_mem > limit)
578 			goto out_unlock;
579 	}
580 
581 	if (reserve) {
582 		for (i = 0; i < glob->num_zones; ++i) {
583 			zone = glob->zones[i];
584 			if (single_zone && zone != single_zone)
585 				continue;
586 			zone->used_mem += amount;
587 		}
588 	}
589 
590 	ret = 0;
591 out_unlock:
592 	spin_unlock(&glob->lock);
593 	ttm_check_swapping(glob);
594 
595 	return ret;
596 }
597 
598 
ttm_mem_global_alloc_zone(struct ttm_mem_global * glob,struct ttm_mem_zone * single_zone,uint64_t memory,struct ttm_operation_ctx * ctx)599 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
600 				     struct ttm_mem_zone *single_zone,
601 				     uint64_t memory,
602 				     struct ttm_operation_ctx *ctx)
603 {
604 	int count = TTM_MEMORY_ALLOC_RETRIES;
605 
606 	while (unlikely(ttm_mem_global_reserve(glob,
607 					       single_zone,
608 					       memory, true)
609 			!= 0)) {
610 		if (ctx->no_wait_gpu)
611 			return -ENOMEM;
612 		if (unlikely(count-- == 0))
613 			return -ENOMEM;
614 		ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
615 	}
616 
617 	return 0;
618 }
619 
ttm_mem_global_alloc(struct ttm_mem_global * glob,uint64_t memory,struct ttm_operation_ctx * ctx)620 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
621 			 struct ttm_operation_ctx *ctx)
622 {
623 	/**
624 	 * Normal allocations of kernel memory are registered in
625 	 * the kernel zone.
626 	 */
627 
628 	return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx);
629 }
630 EXPORT_SYMBOL(ttm_mem_global_alloc);
631 
ttm_mem_global_alloc_page(struct ttm_mem_global * glob,struct page * page,uint64_t size,struct ttm_operation_ctx * ctx)632 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
633 			      struct page *page, uint64_t size,
634 			      struct ttm_operation_ctx *ctx)
635 {
636 	struct ttm_mem_zone *zone = NULL;
637 
638 	/**
639 	 * Page allocations may be registed in a single zone
640 	 * only if highmem or !dma32.
641 	 */
642 
643 #ifdef CONFIG_HIGHMEM
644 	if (PageHighMem(page) && glob->zone_highmem != NULL)
645 		zone = glob->zone_highmem;
646 #else
647 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
648 		zone = glob->zone_kernel;
649 #endif
650 	return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
651 }
652 
ttm_mem_global_free_page(struct ttm_mem_global * glob,struct page * page,uint64_t size)653 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
654 			      uint64_t size)
655 {
656 	struct ttm_mem_zone *zone = NULL;
657 
658 #ifdef CONFIG_HIGHMEM
659 	if (PageHighMem(page) && glob->zone_highmem != NULL)
660 		zone = glob->zone_highmem;
661 #else
662 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
663 		zone = glob->zone_kernel;
664 #endif
665 	ttm_mem_global_free_zone(glob, zone, size);
666 }
667 
ttm_round_pot(size_t size)668 size_t ttm_round_pot(size_t size)
669 {
670 	if ((size & (size - 1)) == 0)
671 		return size;
672 	else if (size > PAGE_SIZE)
673 		return PAGE_ALIGN(size);
674 	else {
675 		size_t tmp_size = 4;
676 
677 		while (tmp_size < size)
678 			tmp_size <<= 1;
679 
680 		return tmp_size;
681 	}
682 	return 0;
683 }
684 EXPORT_SYMBOL(ttm_round_pot);
685