Lines Matching +full:non +full:- +full:descriptive

1 // SPDX-License-Identifier: GPL-2.0
31 struct sched_group *group = sd->groups; in sched_domain_debug_one()
35 printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); in sched_domain_debug_one()
37 if (!(sd->flags & SD_LOAD_BALANCE)) { in sched_domain_debug_one()
38 printk("does not load-balance\n"); in sched_domain_debug_one()
39 if (sd->parent) in sched_domain_debug_one()
41 return -1; in sched_domain_debug_one()
45 cpumask_pr_args(sched_domain_span(sd)), sd->name); in sched_domain_debug_one()
48 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); in sched_domain_debug_one()
51 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); in sched_domain_debug_one()
68 if (!(sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
78 group->sgc->id, in sched_domain_debug_one()
81 if ((sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
87 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) in sched_domain_debug_one()
88 printk(KERN_CONT " cap=%lu", group->sgc->capacity); in sched_domain_debug_one()
90 if (group == sd->groups && sd->child && in sched_domain_debug_one()
91 !cpumask_equal(sched_domain_span(sd->child), in sched_domain_debug_one()
93 printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); in sched_domain_debug_one()
98 group = group->next; in sched_domain_debug_one()
100 if (group != sd->groups) in sched_domain_debug_one()
103 } while (group != sd->groups); in sched_domain_debug_one()
107 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); in sched_domain_debug_one()
109 if (sd->parent && in sched_domain_debug_one()
110 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) in sched_domain_debug_one()
111 printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); in sched_domain_debug_one()
123 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); in sched_domain_debug()
127 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); in sched_domain_debug()
133 sd = sd->parent; in sched_domain_debug()
154 if (sd->flags & (SD_LOAD_BALANCE | in sd_degenerate()
162 if (sd->groups != sd->groups->next) in sd_degenerate()
167 if (sd->flags & (SD_WAKE_AFFINE)) in sd_degenerate()
176 unsigned long cflags = sd->flags, pflags = parent->flags; in sd_parent_degenerate()
185 if (parent->groups == parent->groups->next) { in sd_parent_degenerate()
217 return -EPERM; in sched_energy_aware_handler()
240 tmp = pd->next; in free_pd()
251 pd = pd->next; in find_pd()
271 pd->em_pd = obj; in pd_init()
288 em_pd_nr_cap_states(pd->em_pd)); in perf_domain_debug()
289 pd = pd->next; in perf_domain_debug()
328 * - nr_pd: the number of performance domains
329 * - nr_cpus: the number of CPUs
330 * - nr_cs: the sum of the number of capacity states of all performance
334 * It is generally not a good idea to use such a model in the wake-up path on
337 * with per-CPU DVFS and less than 8 capacity states each, for example.
347 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()
372 gov = policy->governor; in build_perf_domains()
375 if (rd->pd) in build_perf_domains()
385 tmp->next = pd; in build_perf_domains()
393 nr_cs += em_pd_nr_cap_states(pd->em_pd); in build_perf_domains()
406 tmp = rd->pd; in build_perf_domains()
407 rcu_assign_pointer(rd->pd, pd); in build_perf_domains()
409 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); in build_perf_domains()
415 tmp = rd->pd; in build_perf_domains()
416 rcu_assign_pointer(rd->pd, NULL); in build_perf_domains()
418 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); in build_perf_domains()
430 cpupri_cleanup(&rd->cpupri); in free_rootdomain()
431 cpudl_cleanup(&rd->cpudl); in free_rootdomain()
432 free_cpumask_var(rd->dlo_mask); in free_rootdomain()
433 free_cpumask_var(rd->rto_mask); in free_rootdomain()
434 free_cpumask_var(rd->online); in free_rootdomain()
435 free_cpumask_var(rd->span); in free_rootdomain()
436 free_pd(rd->pd); in free_rootdomain()
445 raw_spin_lock_irqsave(&rq->lock, flags); in rq_attach_root()
447 if (rq->rd) { in rq_attach_root()
448 old_rd = rq->rd; in rq_attach_root()
450 if (cpumask_test_cpu(rq->cpu, old_rd->online)) in rq_attach_root()
453 cpumask_clear_cpu(rq->cpu, old_rd->span); in rq_attach_root()
460 if (!atomic_dec_and_test(&old_rd->refcount)) in rq_attach_root()
464 atomic_inc(&rd->refcount); in rq_attach_root()
465 rq->rd = rd; in rq_attach_root()
467 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
468 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) in rq_attach_root()
471 raw_spin_unlock_irqrestore(&rq->lock, flags); in rq_attach_root()
474 call_rcu(&old_rd->rcu, free_rootdomain); in rq_attach_root()
479 atomic_inc(&rd->refcount); in sched_get_rd()
484 if (!atomic_dec_and_test(&rd->refcount)) in sched_put_rd()
487 call_rcu(&rd->rcu, free_rootdomain); in sched_put_rd()
492 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) in init_rootdomain()
494 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) in init_rootdomain()
496 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) in init_rootdomain()
498 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) in init_rootdomain()
502 rd->rto_cpu = -1; in init_rootdomain()
503 raw_spin_lock_init(&rd->rto_lock); in init_rootdomain()
504 init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); in init_rootdomain()
507 init_dl_bw(&rd->dl_bw); in init_rootdomain()
508 if (cpudl_init(&rd->cpudl) != 0) in init_rootdomain()
511 if (cpupri_init(&rd->cpupri) != 0) in init_rootdomain()
516 cpudl_cleanup(&rd->cpudl); in init_rootdomain()
518 free_cpumask_var(rd->rto_mask); in init_rootdomain()
520 free_cpumask_var(rd->dlo_mask); in init_rootdomain()
522 free_cpumask_var(rd->online); in init_rootdomain()
524 free_cpumask_var(rd->span); in init_rootdomain()
526 return -ENOMEM; in init_rootdomain()
530 * By default the system creates a single root-domain with all CPUs as
567 tmp = sg->next; in free_sched_groups()
569 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) in free_sched_groups()
570 kfree(sg->sgc); in free_sched_groups()
572 if (atomic_dec_and_test(&sg->ref)) in free_sched_groups()
585 free_sched_groups(sd->groups, 1); in destroy_sched_domain()
587 if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) in destroy_sched_domain()
588 kfree(sd->shared); in destroy_sched_domain()
597 struct sched_domain *parent = sd->parent; in destroy_sched_domains_rcu()
606 call_rcu(&sd->rcu, destroy_sched_domains_rcu); in destroy_sched_domains()
638 sds = sd->shared; in update_top_cache_domain()
668 struct sched_domain *parent = tmp->parent; in cpu_attach_domain()
673 tmp->parent = parent->parent; in cpu_attach_domain()
674 if (parent->parent) in cpu_attach_domain()
675 parent->parent->child = tmp; in cpu_attach_domain()
681 if (parent->flags & SD_PREFER_SIBLING) in cpu_attach_domain()
682 tmp->flags |= SD_PREFER_SIBLING; in cpu_attach_domain()
685 tmp = tmp->parent; in cpu_attach_domain()
690 sd = sd->parent; in cpu_attach_domain()
693 sd->child = NULL; in cpu_attach_domain()
699 tmp = rq->sd; in cpu_attach_domain()
700 rcu_assign_pointer(rq->sd, sd); in cpu_attach_domain()
737 * Given a node-distance table, for example:
747 * 0 ----- 1
751 * 3 ----- 2
759 * NUMA-2 0-3 0-3 0-3 0-3
760 * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2}
762 * NUMA-1 0-1,3 0-2 1-3 0,2-3
765 * NUMA-0 0 1 2 3
770 * represented multiple times -- hence the "overlap" naming for this part of
774 * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
778 * - the first group of each domain is its child domain; this
779 * gets us the first 0-1,3
780 * - the only uncovered node is 2, who's child domain is 1-3.
783 * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
784 * groups include the CPUs of Node-0, while those CPUs would not in fact ever
785 * end up at those groups (they would end up in group: 0-1,3).
808 * 0 ----- 1
812 * 2 ----- 3
820 * NUMA-2 0-3 0-3
821 * groups: {0-2},{1-3} {1-3},{0-2}
823 * NUMA-1 0-2 0-3 0-3 1-3
825 * NUMA-0 0 1 2 3
843 struct sd_data *sdd = sd->private; in build_balance_mask()
850 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask()
857 if (!sibling->child) in build_balance_mask()
861 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) in build_balance_mask()
872 * XXX: This creates per-node group entries; since the load-balancer will
873 * immediately access remote memory to construct this group's load-balance
889 if (sd->child) in build_group_from_child_sched_domain()
890 cpumask_copy(sg_span, sched_domain_span(sd->child)); in build_group_from_child_sched_domain()
894 atomic_inc(&sg->ref); in build_group_from_child_sched_domain()
902 struct sd_data *sdd = sd->private; in init_overlap_sched_group()
909 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
910 if (atomic_inc_return(&sg->sgc->ref) == 1) in init_overlap_sched_group()
916 * Initialize sgc->capacity such that even if we mess up the in init_overlap_sched_group()
921 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group()
922 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
923 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
932 struct sd_data *sdd = sd->private; in build_overlap_sched_groups()
944 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups()
971 last->next = sg; in build_overlap_sched_groups()
973 last->next = first; in build_overlap_sched_groups()
975 sd->groups = first; in build_overlap_sched_groups()
982 return -ENOMEM; in build_overlap_sched_groups()
987 * Package topology (also see the load-balance blurb in fair.c)
992 * - Simultaneous multithreading (SMT)
993 * - Multi-Core Cache (MC)
994 * - Package (DIE)
1000 * sched_domain -> sched_group -> sched_group_capacity
1002 * `-' `-'
1004 * The sched_domains are per-CPU and have a two way link (parent & child) and
1020 * - or -
1022 * DIE 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
1023 * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1024 * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1037 * - The first is the balance_cpu (see should_we_balance() and the
1038 * load-balance blub in fair.c); for each group we only want 1 CPU to
1041 * - The second is the sched_group_capacity; we want all identical groups
1059 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group()
1060 struct sched_domain *child = sd->child; in get_group()
1067 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
1068 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1071 already_visited = atomic_inc_return(&sg->ref) > 1; in get_group()
1073 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); in get_group()
1087 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group()
1088 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in get_group()
1089 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in get_group()
1096 * covered by the given span, will set each group's ->cpumask correctly,
1097 * and will initialize their ->sgc.
1105 struct sd_data *sdd = sd->private; in build_sched_groups()
1128 last->next = sg; in build_sched_groups()
1131 last->next = first; in build_sched_groups()
1132 sd->groups = first; in build_sched_groups()
1149 struct sched_group *sg = sd->groups; in init_sched_groups_capacity()
1154 int cpu, max_cpu = -1; in init_sched_groups_capacity()
1156 sg->group_weight = cpumask_weight(sched_group_span(sg)); in init_sched_groups_capacity()
1158 if (!(sd->flags & SD_ASYM_PACKING)) in init_sched_groups_capacity()
1167 sg->asym_prefer_cpu = max_cpu; in init_sched_groups_capacity()
1170 sg = sg->next; in init_sched_groups_capacity()
1171 } while (sg != sd->groups); in init_sched_groups_capacity()
1181 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1184 static int default_relax_domain_level = -1;
1201 if (!attr || attr->relax_domain_level < 0) { in set_domain_attribute()
1207 request = attr->relax_domain_level; in set_domain_attribute()
1208 if (request < sd->level) { in set_domain_attribute()
1210 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); in set_domain_attribute()
1213 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); in set_domain_attribute()
1225 if (!atomic_read(&d->rd->refcount)) in __free_domain_allocs()
1226 free_rootdomain(&d->rd->rcu); in __free_domain_allocs()
1229 free_percpu(d->sd); in __free_domain_allocs()
1246 d->sd = alloc_percpu(struct sched_domain *); in __visit_domain_allocation_hell()
1247 if (!d->sd) in __visit_domain_allocation_hell()
1249 d->rd = alloc_rootdomain(); in __visit_domain_allocation_hell()
1250 if (!d->rd) in __visit_domain_allocation_hell()
1263 struct sd_data *sdd = sd->private; in claim_allocations()
1265 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
1266 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
1268 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations()
1269 *per_cpu_ptr(sdd->sds, cpu) = NULL; in claim_allocations()
1271 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations()
1272 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations()
1274 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) in claim_allocations()
1275 *per_cpu_ptr(sdd->sgc, cpu) = NULL; in claim_allocations()
1293 * These flags are purely descriptive of the topology and do not prescribe
1297 * SD_SHARE_CPUCAPACITY - describes SMT topologies
1298 * SD_SHARE_PKG_RESOURCES - describes shared caches
1299 * SD_NUMA - describes NUMA topologies
1300 * SD_SHARE_POWERDOMAIN - describes shared power domain
1305 * SD_ASYM_PACKING - describes SMT quirks
1319 struct sd_data *sdd = &tl->data; in sd_init()
1320 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in sd_init()
1327 sched_domains_curr_level = tl->numa_level; in sd_init()
1330 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init()
1332 if (tl->sd_flags) in sd_init()
1333 sd_flags = (*tl->sd_flags)(); in sd_init()
1369 .name = tl->name, in sd_init()
1373 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); in sd_init()
1380 if (sd->flags & SD_ASYM_CPUCAPACITY) { in sd_init()
1386 if (sd->child) in sd_init()
1387 sd->child->flags &= ~SD_PREFER_SIBLING; in sd_init()
1390 t->flags |= SD_BALANCE_WAKE; in sd_init()
1393 if (sd->flags & SD_SHARE_CPUCAPACITY) { in sd_init()
1394 sd->imbalance_pct = 110; in sd_init()
1396 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { in sd_init()
1397 sd->imbalance_pct = 117; in sd_init()
1398 sd->cache_nice_tries = 1; in sd_init()
1401 } else if (sd->flags & SD_NUMA) { in sd_init()
1402 sd->cache_nice_tries = 2; in sd_init()
1404 sd->flags &= ~SD_PREFER_SIBLING; in sd_init()
1405 sd->flags |= SD_SERIALIZE; in sd_init()
1406 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { in sd_init()
1407 sd->flags &= ~(SD_BALANCE_EXEC | in sd_init()
1414 sd->cache_nice_tries = 1; in sd_init()
1421 if (sd->flags & SD_SHARE_PKG_RESOURCES) { in sd_init()
1422 sd->shared = *per_cpu_ptr(sdd->sds, sd_id); in sd_init()
1423 atomic_inc(&sd->shared->ref); in sd_init()
1424 atomic_set(&sd->shared->nr_busy_cpus, sd_weight); in sd_init()
1427 sd->private = sdd; in sd_init()
1433 * Topology list, bottom-up.
1450 for (tl = sched_domain_topology; tl->mask; tl++)
1516 * - If the maximum distance between any nodes is 1 hop, the system
1518 * - If for two nodes A and B, located N > 1 hops away from each other,
1571 * O(nr_nodes^2) deduplicating selection sort -- in order to find the in sched_init_numa()
1594 sched_numa_warn("Node-distance not symmetric"); in sched_init_numa()
1597 sched_numa_warn("Node-0 not representative"); in sched_init_numa()
1700 sched_max_numa_distance = sched_domains_numa_distance[level - 1]; in sched_init_numa()
1729 * sched_numa_find_closest() - given the NUMA topology, find the cpu
1756 struct sd_data *sdd = &tl->data; in __sdt_alloc()
1758 sdd->sd = alloc_percpu(struct sched_domain *); in __sdt_alloc()
1759 if (!sdd->sd) in __sdt_alloc()
1760 return -ENOMEM; in __sdt_alloc()
1762 sdd->sds = alloc_percpu(struct sched_domain_shared *); in __sdt_alloc()
1763 if (!sdd->sds) in __sdt_alloc()
1764 return -ENOMEM; in __sdt_alloc()
1766 sdd->sg = alloc_percpu(struct sched_group *); in __sdt_alloc()
1767 if (!sdd->sg) in __sdt_alloc()
1768 return -ENOMEM; in __sdt_alloc()
1770 sdd->sgc = alloc_percpu(struct sched_group_capacity *); in __sdt_alloc()
1771 if (!sdd->sgc) in __sdt_alloc()
1772 return -ENOMEM; in __sdt_alloc()
1783 return -ENOMEM; in __sdt_alloc()
1785 *per_cpu_ptr(sdd->sd, j) = sd; in __sdt_alloc()
1790 return -ENOMEM; in __sdt_alloc()
1792 *per_cpu_ptr(sdd->sds, j) = sds; in __sdt_alloc()
1797 return -ENOMEM; in __sdt_alloc()
1799 sg->next = sg; in __sdt_alloc()
1801 *per_cpu_ptr(sdd->sg, j) = sg; in __sdt_alloc()
1806 return -ENOMEM; in __sdt_alloc()
1809 sgc->id = j; in __sdt_alloc()
1812 *per_cpu_ptr(sdd->sgc, j) = sgc; in __sdt_alloc()
1825 struct sd_data *sdd = &tl->data; in __sdt_free()
1830 if (sdd->sd) { in __sdt_free()
1831 sd = *per_cpu_ptr(sdd->sd, j); in __sdt_free()
1832 if (sd && (sd->flags & SD_OVERLAP)) in __sdt_free()
1833 free_sched_groups(sd->groups, 0); in __sdt_free()
1834 kfree(*per_cpu_ptr(sdd->sd, j)); in __sdt_free()
1837 if (sdd->sds) in __sdt_free()
1838 kfree(*per_cpu_ptr(sdd->sds, j)); in __sdt_free()
1839 if (sdd->sg) in __sdt_free()
1840 kfree(*per_cpu_ptr(sdd->sg, j)); in __sdt_free()
1841 if (sdd->sgc) in __sdt_free()
1842 kfree(*per_cpu_ptr(sdd->sgc, j)); in __sdt_free()
1844 free_percpu(sdd->sd); in __sdt_free()
1845 sdd->sd = NULL; in __sdt_free()
1846 free_percpu(sdd->sds); in __sdt_free()
1847 sdd->sds = NULL; in __sdt_free()
1848 free_percpu(sdd->sg); in __sdt_free()
1849 sdd->sg = NULL; in __sdt_free()
1850 free_percpu(sdd->sgc); in __sdt_free()
1851 sdd->sgc = NULL; in __sdt_free()
1862 sd->level = child->level + 1; in build_sched_domain()
1863 sched_domain_level_max = max(sched_domain_level_max, sd->level); in build_sched_domain()
1864 child->parent = sd; in build_sched_domain()
1871 child->name, sd->name); in build_sched_domain()
1923 for_each_cpu_and(j, tl->mask(i), cpu_map) { in asym_cpu_capacity_level()
1955 int i, ret = -ENOMEM; in build_sched_domains()
1985 if (tl->flags & SDTL_OVERLAP) in build_sched_domains()
1986 sd->flags |= SD_OVERLAP; in build_sched_domains()
1994 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
1995 sd->span_weight = cpumask_weight(sched_domain_span(sd)); in build_sched_domains()
1996 if (sd->flags & SD_OVERLAP) { in build_sched_domains()
2007 for (i = nr_cpumask_bits-1; i >= 0; i--) { in build_sched_domains()
2011 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
2024 if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) in build_sched_domains()
2025 WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); in build_sched_domains()
2036 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); in build_sched_domains()
2219 * its dl_bw->total_bw needs to be cleared. It in partition_sched_domains_locked()
2223 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; in partition_sched_domains_locked()
2228 /* No match - a current sched domain not in new doms_new[] */ in partition_sched_domains_locked()
2249 /* No match - add a new doms_new */ in partition_sched_domains_locked()
2260 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
2265 /* No match - add perf. domains for a new rd */ in partition_sched_domains_locked()