Lines Matching full:end

36 	.end	= IO_SPACE_LIMIT,
44 .end = -1,
108 unsigned long long start, end; in r_show() local
109 int width = root->end < 0x10000 ? 4 : 8; in r_show()
118 end = r->end; in r_show()
120 start = end = 0; in r_show()
126 width, end, in r_show()
187 resource_size_t end = new->end; in __request_resource() local
190 if (end < start) in __request_resource()
194 if (end > root->end) in __request_resource()
199 if (!tmp || tmp->start > end) { in __request_resource()
206 if (tmp->end < start) in __request_resource()
260 tmp->end = size - 1; in __release_child_resources()
323 * [@start..@end].
326 * of the resource that's within [@start..@end]; if none is found, returns
330 * @end: end address of same resource
335 * The caller must specify @start, @end, @flags, and @desc
338 static int find_next_iomem_res(resource_size_t start, resource_size_t end, in find_next_iomem_res() argument
347 if (start >= end) in find_next_iomem_res()
354 if (p->start > end) { in find_next_iomem_res()
360 if (p->end < start) in find_next_iomem_res()
376 .end = min(end, p->end), in find_next_iomem_res()
387 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, in __walk_iomem_res_desc() argument
395 while (start < end && in __walk_iomem_res_desc()
396 !find_next_iomem_res(start, end, flags, desc, &res)) { in __walk_iomem_res_desc()
401 start = res.end + 1; in __walk_iomem_res_desc()
414 * @end: end addr
418 * All the memory ranges which overlap start,end and also match flags and
425 u64 end, void *arg, int (*func)(struct resource *, void *)) in walk_iomem_res_desc() argument
427 return __walk_iomem_res_desc(start, end, flags, desc, arg, func); in walk_iomem_res_desc()
438 int walk_system_ram_res(u64 start, u64 end, void *arg, in walk_system_ram_res() argument
443 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, in walk_system_ram_res()
451 int walk_mem_res(u64 start, u64 end, void *arg, in walk_mem_res() argument
456 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, in walk_mem_res()
468 resource_size_t start, end; in walk_system_ram_range() local
475 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; in walk_system_ram_range()
477 while (start < end && in walk_system_ram_range()
478 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) { in walk_system_ram_range()
480 end_pfn = PFN_DOWN(res.end + 1); in walk_system_ram_range()
485 start = res.end + 1; in walk_system_ram_range()
513 res.end = start + size - 1; in __region_intersects()
583 if (res->end > max) in resource_clip()
584 res->end = max; in resource_clip()
602 * of this->start - 1 to tmp->end below would cause an underflow. in __find_resource()
605 tmp.start = (this == old) ? old->start : this->end + 1; in __find_resource()
610 tmp.end = (this == old) ? this->end : this->start - 1; in __find_resource()
612 tmp.end = root->end; in __find_resource()
614 if (tmp.end < tmp.start) in __find_resource()
622 avail.end = tmp.end; in __find_resource()
628 alloc.end = alloc.start + size - 1; in __find_resource()
629 if (alloc.start <= alloc.end && in __find_resource()
632 new->end = alloc.end; in __find_resource()
637 next: if (!this || this->end == root->end) in __find_resource()
641 tmp.start = this->end + 1; in __find_resource()
682 old->end = new.end; in reallocate_resource()
693 old->end = new.end; in reallocate_resource()
794 if ((first->start > new->start) || (first->end < new->end)) in __insert_resource()
796 if ((first->start == new->start) && (first->end == new->end)) in __insert_resource()
802 if (next->start < new->start || next->end > new->end) in __insert_resource()
806 if (next->sibling->start > new->end) in __insert_resource()
900 if (conflict->end > new->end) in insert_resource_expand_to_fit()
901 new->end = conflict->end; in insert_resource_expand_to_fit()
938 resource_size_t end = start + size - 1; in __adjust_resource() local
944 if ((start < parent->start) || (end > parent->end)) in __adjust_resource()
947 if (res->sibling && (res->sibling->start <= end)) in __adjust_resource()
954 if (start <= tmp->end) in __adjust_resource()
960 if ((tmp->start < start) || (tmp->end > end)) in __adjust_resource()
964 res->end = end; in __adjust_resource()
995 resource_size_t end, const char *name) in __reserve_region_with_split() argument
1008 res->end = end; in __reserve_region_with_split()
1025 conflict->end >= res->end) { in __reserve_region_with_split()
1033 end = res->end; in __reserve_region_with_split()
1034 res->end = conflict->start - 1; in __reserve_region_with_split()
1035 if (conflict->end < end) { in __reserve_region_with_split()
1042 next_res->start = conflict->end + 1; in __reserve_region_with_split()
1043 next_res->end = end; in __reserve_region_with_split()
1048 res->start = conflict->end + 1; in __reserve_region_with_split()
1056 resource_size_t end, const char *name) in reserve_region_with_split() argument
1061 if (root->start > start || root->end < end) { in reserve_region_with_split()
1063 (unsigned long long)start, (unsigned long long)end, in reserve_region_with_split()
1065 if (start > root->end || end < root->start) in reserve_region_with_split()
1068 if (end > root->end) in reserve_region_with_split()
1069 end = root->end; in reserve_region_with_split()
1074 (unsigned long long)end); in reserve_region_with_split()
1079 __reserve_region_with_split(root, start, end, name); in reserve_region_with_split()
1138 devmem_is_allowed(PHYS_PFN(res->end))) { in revoke_iomem()
1171 res->end = start + n - 1; in __request_region_locked()
1260 resource_size_t end; in __release_region() local
1263 end = start + n - 1; in __release_region()
1272 if (res->start <= start && res->end >= end) { in __release_region()
1277 if (res->start != start || res->end != end) in __release_region()
1293 (unsigned long long)end); in __release_region()
1324 resource_size_t end; in release_mem_region_adjustable() local
1326 end = start + size - 1; in release_mem_region_adjustable()
1327 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end))) in release_mem_region_adjustable()
1344 if (res->start >= end) in release_mem_region_adjustable()
1348 if (res->start > start || res->end < end) { in release_mem_region_adjustable()
1376 if (res->start == start && res->end == end) { in release_mem_region_adjustable()
1380 } else if (res->start == start && res->end != end) { in release_mem_region_adjustable()
1382 WARN_ON_ONCE(__adjust_resource(res, end + 1, in release_mem_region_adjustable()
1383 res->end - end)); in release_mem_region_adjustable()
1384 } else if (res->start != start && res->end == end) { in release_mem_region_adjustable()
1385 /* adjust the end */ in release_mem_region_adjustable()
1399 new_res->start = end + 1; in release_mem_region_adjustable()
1400 new_res->end = res->end; in release_mem_region_adjustable()
1427 return r1->flags == r2->flags && r1->end + 1 == r2->start && in system_ram_resources_mergeable()
1464 res->end = cur->end; in merge_system_ram_resource()
1474 cur->end = res->end; in merge_system_ram_resource()
1647 res->end = io_start + io_num - 1; in reserve_setup()
1677 if (p->end < addr) in iomem_map_sanity_check()
1680 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) in iomem_map_sanity_check()
1733 if (p->end < addr) in iomem_is_exclusive()
1781 resource_size_t end, addr; in __request_free_mem_region() local
1786 end = min_t(unsigned long, base->end, (1UL << MAX_PHYSMEM_BITS) - 1); in __request_free_mem_region()
1787 addr = end - size + 1UL; in __request_free_mem_region()