Home
last modified time | relevance | path

Searched refs:zone (Results 1 – 25 of 243) sorted by relevance

12345678910

/Linux-v5.4/drivers/gpu/drm/ttm/
Dttm_memory.c81 struct ttm_mem_zone *zone = in ttm_mem_zone_kobj_release() local
85 zone->name, (unsigned long long)zone->used_mem >> 10); in ttm_mem_zone_kobj_release()
86 kfree(zone); in ttm_mem_zone_kobj_release()
93 struct ttm_mem_zone *zone = in ttm_mem_zone_show() local
97 spin_lock(&zone->glob->lock); in ttm_mem_zone_show()
99 val = zone->zone_mem; in ttm_mem_zone_show()
101 val = zone->emer_mem; in ttm_mem_zone_show()
103 val = zone->max_mem; in ttm_mem_zone_show()
105 val = zone->swap_limit; in ttm_mem_zone_show()
107 val = zone->used_mem; in ttm_mem_zone_show()
[all …]
/Linux-v5.4/drivers/block/
Dnull_blk_zoned.c39 struct blk_zone *zone = &dev->zones[i]; in null_zone_init() local
41 zone->start = sector; in null_zone_init()
42 zone->len = dev->zone_size_sects; in null_zone_init()
43 zone->wp = zone->start + zone->len; in null_zone_init()
44 zone->type = BLK_ZONE_TYPE_CONVENTIONAL; in null_zone_init()
45 zone->cond = BLK_ZONE_COND_NOT_WP; in null_zone_init()
51 struct blk_zone *zone = &dev->zones[i]; in null_zone_init() local
53 zone->start = zone->wp = sector; in null_zone_init()
54 zone->len = dev->zone_size_sects; in null_zone_init()
55 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ; in null_zone_init()
[all …]
/Linux-v5.4/include/linux/
Dmemory_hotplug.h11 struct zone;
73 static inline unsigned zone_span_seqbegin(struct zone *zone) in zone_span_seqbegin() argument
75 return read_seqbegin(&zone->span_seqlock); in zone_span_seqbegin()
77 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) in zone_span_seqretry() argument
79 return read_seqretry(&zone->span_seqlock, iv); in zone_span_seqretry()
81 static inline void zone_span_writelock(struct zone *zone) in zone_span_writelock() argument
83 write_seqlock(&zone->span_seqlock); in zone_span_writelock()
85 static inline void zone_span_writeunlock(struct zone *zone) in zone_span_writeunlock() argument
87 write_sequnlock(&zone->span_seqlock); in zone_span_writeunlock()
89 static inline void zone_seqlock_init(struct zone *zone) in zone_seqlock_init() argument
[all …]
Dvmstat.h136 static inline void zone_numa_state_add(long x, struct zone *zone, in zone_numa_state_add() argument
139 atomic_long_add(x, &zone->vm_numa_stat[item]); in zone_numa_state_add()
150 static inline unsigned long zone_numa_state_snapshot(struct zone *zone, in zone_numa_state_snapshot() argument
153 long x = atomic_long_read(&zone->vm_numa_stat[item]); in zone_numa_state_snapshot()
157 x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]; in zone_numa_state_snapshot()
163 static inline void zone_page_state_add(long x, struct zone *zone, in zone_page_state_add() argument
166 atomic_long_add(x, &zone->vm_stat[item]); in zone_page_state_add()
197 static inline unsigned long zone_page_state(struct zone *zone, in zone_page_state() argument
200 long x = atomic_long_read(&zone->vm_stat[item]); in zone_page_state()
214 static inline unsigned long zone_page_state_snapshot(struct zone *zone, in zone_page_state_snapshot() argument
[all …]
Dmmzone.h418 struct zone { struct
587 static inline unsigned long zone_managed_pages(struct zone *zone) in zone_managed_pages() argument
589 return (unsigned long)atomic_long_read(&zone->managed_pages); in zone_managed_pages()
592 static inline unsigned long zone_end_pfn(const struct zone *zone) in zone_end_pfn() argument
594 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn()
597 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) in zone_spans_pfn() argument
599 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn()
602 static inline bool zone_is_initialized(struct zone *zone) in zone_is_initialized() argument
604 return zone->initialized; in zone_is_initialized()
607 static inline bool zone_is_empty(struct zone *zone) in zone_is_empty() argument
[all …]
Dcompaction.h93 extern int fragmentation_index(struct zone *zone, unsigned int order);
99 extern enum compact_result compaction_suitable(struct zone *zone, int order,
102 extern void defer_compaction(struct zone *zone, int order);
103 extern bool compaction_deferred(struct zone *zone, int order);
104 extern void compaction_defer_reset(struct zone *zone, int order,
106 extern bool compaction_restarting(struct zone *zone, int order);
192 static inline enum compact_result compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument
198 static inline void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
202 static inline bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
Dpage-isolation.h6 static inline bool has_isolate_pageblock(struct zone *zone) in has_isolate_pageblock() argument
8 return zone->nr_isolate_pageblock; in has_isolate_pageblock()
19 static inline bool has_isolate_pageblock(struct zone *zone) in has_isolate_pageblock() argument
36 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
39 int move_freepages_block(struct zone *zone, struct page *page,
/Linux-v5.4/mm/
Dvmstat.c40 static void zero_zone_numa_counters(struct zone *zone) in zero_zone_numa_counters() argument
45 atomic_long_set(&zone->vm_numa_stat[item], 0); in zero_zone_numa_counters()
47 per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item] in zero_zone_numa_counters()
55 struct zone *zone; in zero_zones_numa_counters() local
57 for_each_populated_zone(zone) in zero_zones_numa_counters()
58 zero_zone_numa_counters(zone); in zero_zones_numa_counters()
172 int calculate_pressure_threshold(struct zone *zone) in calculate_pressure_threshold() argument
185 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); in calculate_pressure_threshold()
196 int calculate_normal_threshold(struct zone *zone) in calculate_normal_threshold() argument
231 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); in calculate_normal_threshold()
[all …]
Dmemory_hotplug.c329 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument
340 if (zone && zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
350 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument
365 if (zone && zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn()
374 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
377 unsigned long zone_start_pfn = zone->zone_start_pfn; in shrink_zone_span()
378 unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ in shrink_zone_span()
381 int nid = zone_to_nid(zone); in shrink_zone_span()
383 zone_span_writelock(zone); in shrink_zone_span()
391 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span()
[all …]
Dcompaction.c142 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
144 zone->compact_considered = 0; in defer_compaction()
145 zone->compact_defer_shift++; in defer_compaction()
147 if (order < zone->compact_order_failed) in defer_compaction()
148 zone->compact_order_failed = order; in defer_compaction()
150 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
151 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
153 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
157 bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
159 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
[all …]
Dpage_alloc.c103 struct zone *zone; member
561 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
569 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
570 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
571 sp = zone->spanned_pages; in page_outside_zone_boundaries()
572 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
574 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
578 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
584 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
588 if (zone != page_zone(page)) in page_is_consistent()
[all …]
Dpage_isolation.c20 struct zone *zone; in set_migratetype_isolate() local
26 zone = page_zone(page); in set_migratetype_isolate()
28 spin_lock_irqsave(&zone->lock, flags); in set_migratetype_isolate()
62 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, in set_migratetype_isolate()
77 zone->nr_isolate_pageblock++; in set_migratetype_isolate()
78 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, in set_migratetype_isolate()
81 __mod_zone_freepage_state(zone, -nr_pages, mt); in set_migratetype_isolate()
84 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate()
86 drain_all_pages(zone); in set_migratetype_isolate()
92 struct zone *zone; in unset_migratetype_isolate() local
[all …]
Dmmzone.c30 struct zone *next_zone(struct zone *zone) in next_zone() argument
32 pg_data_t *pgdat = zone->zone_pgdat; in next_zone()
34 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone()
35 zone++; in next_zone()
39 zone = pgdat->node_zones; in next_zone()
41 zone = NULL; in next_zone()
43 return zone; in next_zone()
69 (z->zone && !zref_in_nodemask(z, nodes))) in __next_zones_zonelist()
77 struct page *page, struct zone *zone) in memmap_valid_within() argument
82 if (page_zone(page) != zone) in memmap_valid_within()
/Linux-v5.4/include/net/netfilter/
Dnf_conntrack_zones.h12 return &ct->zone; in nf_ct_zone()
19 nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags) in nf_ct_zone_init() argument
21 zone->id = id; in nf_ct_zone_init()
22 zone->flags = flags; in nf_ct_zone_init()
23 zone->dir = dir; in nf_ct_zone_init()
25 return zone; in nf_ct_zone_init()
36 if (tmpl->zone.flags & NF_CT_FLAG_MARK) in nf_ct_zone_tmpl()
37 return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0); in nf_ct_zone_tmpl()
43 const struct nf_conntrack_zone *zone) in nf_ct_zone_add() argument
46 ct->zone = *zone; in nf_ct_zone_add()
[all …]
/Linux-v5.4/fs/btrfs/
Dreada.c229 struct reada_zone *zone; in reada_find_zone() local
235 zone = NULL; in reada_find_zone()
237 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, in reada_find_zone()
239 if (ret == 1 && logical >= zone->start && logical <= zone->end) { in reada_find_zone()
240 kref_get(&zone->refcnt); in reada_find_zone()
242 return zone; in reada_find_zone()
255 zone = kzalloc(sizeof(*zone), GFP_KERNEL); in reada_find_zone()
256 if (!zone) in reada_find_zone()
261 kfree(zone); in reada_find_zone()
265 zone->start = start; in reada_find_zone()
[all …]
/Linux-v5.4/drivers/md/
Ddm-zoned-metadata.c189 unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_id() argument
191 return ((unsigned int)(zone - zmd->zones)); in dmz_id()
194 sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_start_sect() argument
196 return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_sectors_shift; in dmz_start_sect()
199 sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_start_block() argument
201 return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_blocks_shift; in dmz_start_block()
1083 static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_init_zone() argument
1095 INIT_LIST_HEAD(&zone->link); in dmz_init_zone()
1096 atomic_set(&zone->refcount, 0); in dmz_init_zone()
1097 zone->chunk = DMZ_MAP_UNMAPPED; in dmz_init_zone()
[all …]
Ddm-zoned-target.c21 struct dm_zone *zone; member
85 struct dm_zone *zone = bioctx->zone; in dmz_bio_endio() local
87 if (zone) { in dmz_bio_endio()
90 dmz_is_seq(zone)) in dmz_bio_endio()
91 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); in dmz_bio_endio()
92 dmz_deactivate_zone(zone); in dmz_bio_endio()
115 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, in dmz_submit_bio() argument
128 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); in dmz_submit_bio()
138 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) in dmz_submit_bio()
139 zone->wp_block += nr_blocks; in dmz_submit_bio()
[all …]
Ddm-zoned.h178 unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone);
179 sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone);
180 sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone);
187 void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
189 void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
191 void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
198 static inline void dmz_activate_zone(struct dm_zone *zone) in dmz_activate_zone() argument
200 atomic_inc(&zone->refcount); in dmz_activate_zone()
207 static inline void dmz_deactivate_zone(struct dm_zone *zone) in dmz_deactivate_zone() argument
209 atomic_dec(&zone->refcount); in dmz_deactivate_zone()
[all …]
/Linux-v5.4/kernel/power/
Dsnapshot.c371 struct mem_zone_bm_rtree *zone; member
428 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, in add_rtree_block() argument
435 block_nr = zone->blocks; in add_rtree_block()
445 for (i = zone->levels; i < levels_needed; i++) { in add_rtree_block()
447 &zone->nodes); in add_rtree_block()
451 node->data[0] = (unsigned long)zone->rtree; in add_rtree_block()
452 zone->rtree = node; in add_rtree_block()
453 zone->levels += 1; in add_rtree_block()
457 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves); in add_rtree_block()
462 node = zone->rtree; in add_rtree_block()
[all …]
/Linux-v5.4/drivers/thermal/tegra/
Dtegra-bpmp-thermal.c35 struct tegra_bpmp_thermal_zone *zone = data; in tegra_bpmp_thermal_get_temp() local
43 req.get_temp.zone = zone->idx; in tegra_bpmp_thermal_get_temp()
52 err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg); in tegra_bpmp_thermal_get_temp()
63 struct tegra_bpmp_thermal_zone *zone = data; in tegra_bpmp_thermal_set_trips() local
69 req.set_trip.zone = zone->idx; in tegra_bpmp_thermal_set_trips()
79 return tegra_bpmp_transfer(zone->tegra->bpmp, &msg); in tegra_bpmp_thermal_set_trips()
84 struct tegra_bpmp_thermal_zone *zone; in tz_device_update_work_fn() local
86 zone = container_of(work, struct tegra_bpmp_thermal_zone, in tz_device_update_work_fn()
89 thermal_zone_device_update(zone->tzd, THERMAL_TRIP_VIOLATED); in tz_device_update_work_fn()
109 if (tegra->zones[i]->idx != req->host_trip_reached.zone) in bpmp_mrq_thermal()
[all …]
/Linux-v5.4/virt/kvm/
Dcoalesced_mmio.c36 if (addr < dev->zone.addr) in coalesced_mmio_in_range()
38 if (addr + len > dev->zone.addr + dev->zone.size) in coalesced_mmio_in_range()
89 ring->coalesced_mmio[insert].pio = dev->zone.pio; in coalesced_mmio_write()
142 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_register_coalesced_mmio() argument
147 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_register_coalesced_mmio()
157 dev->zone = *zone; in kvm_vm_ioctl_register_coalesced_mmio()
161 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, in kvm_vm_ioctl_register_coalesced_mmio()
162 zone->addr, zone->size, &dev->dev); in kvm_vm_ioctl_register_coalesced_mmio()
178 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_unregister_coalesced_mmio() argument
182 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_unregister_coalesced_mmio()
[all …]
/Linux-v5.4/include/trace/events/
Dcompaction.h199 TP_PROTO(struct zone *zone,
203 TP_ARGS(zone, order, ret),
213 __entry->nid = zone_to_nid(zone);
214 __entry->idx = zone_idx(zone);
228 TP_PROTO(struct zone *zone,
232 TP_ARGS(zone, order, ret)
237 TP_PROTO(struct zone *zone,
241 TP_ARGS(zone, order, ret)
246 TP_PROTO(struct zone *zone, int order),
248 TP_ARGS(zone, order),
[all …]
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx4/
Dalloc.c250 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); in mlx4_zone_add_one() local
252 if (NULL == zone) in mlx4_zone_add_one()
255 zone->flags = flags; in mlx4_zone_add_one()
256 zone->bitmap = bitmap; in mlx4_zone_add_one()
257 zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0; in mlx4_zone_add_one()
258 zone->priority = priority; in mlx4_zone_add_one()
259 zone->offset = offset; in mlx4_zone_add_one()
263 zone->uid = zone_alloc->last_uid++; in mlx4_zone_add_one()
264 zone->allocator = zone_alloc; in mlx4_zone_add_one()
274 list_add_tail(&zone->prio_list, &it->prio_list); in mlx4_zone_add_one()
[all …]
/Linux-v5.4/drivers/mtd/
Dsm_ftl.c192 static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset) in sm_mkoffset() argument
195 WARN_ON(zone < 0 || zone >= ftl->zone_count); in sm_mkoffset()
202 return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset; in sm_mkoffset()
207 int *zone, int *block, int *boffset) in sm_break_offset() argument
212 *zone = offset >= ftl->zone_count ? -1 : offset; in sm_break_offset()
239 int zone, int block, int boffset, in sm_read_sector() argument
269 if (zone == 0 && block == ftl->cis_block && boffset == in sm_read_sector()
280 ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); in sm_read_sector()
285 block, zone, ret); in sm_read_sector()
303 " as bad" , block, zone); in sm_read_sector()
[all …]
/Linux-v5.4/drivers/thermal/intel/int340x_thermal/
Dint340x_thermal_zone.c13 static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone, in int340x_thermal_get_zone_temp() argument
16 struct int34x_thermal_zone *d = zone->devdata; in int340x_thermal_get_zone_temp()
21 return d->override_ops->get_temp(zone, temp); in int340x_thermal_get_zone_temp()
42 static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone, in int340x_thermal_get_trip_temp() argument
45 struct int34x_thermal_zone *d = zone->devdata; in int340x_thermal_get_trip_temp()
49 return d->override_ops->get_trip_temp(zone, trip, temp); in int340x_thermal_get_trip_temp()
74 static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone, in int340x_thermal_get_trip_type() argument
78 struct int34x_thermal_zone *d = zone->devdata; in int340x_thermal_get_trip_type()
82 return d->override_ops->get_trip_type(zone, trip, type); in int340x_thermal_get_trip_type()
107 static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone, in int340x_thermal_set_trip_temp() argument
[all …]

12345678910