Lines Matching +full:interleave +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
25 * 1. Interleave granularity
26 * 2. Interleave size
31 * All changes to the interleave configuration occur with this lock held
42 struct cxl_region_params *p = &cxlr->params; in uuid_show()
48 rc = sysfs_emit(buf, "%pUb\n", &p->uuid); in uuid_show()
65 p = &cxlr->params; in is_dup()
67 if (uuid_equal(&p->uuid, uuid)) { in is_dup()
69 return -EBUSY; in is_dup()
79 struct cxl_region_params *p = &cxlr->params; in uuid_store()
84 return -EINVAL; in uuid_store()
91 return -EINVAL; in uuid_store()
97 if (uuid_equal(&p->uuid, &temp)) in uuid_store()
100 rc = -EBUSY; in uuid_store()
101 if (p->state >= CXL_CONFIG_ACTIVE) in uuid_store()
108 uuid_copy(&p->uuid, &temp); in uuid_store()
121 return xa_load(&port->regions, (unsigned long)cxlr); in cxl_rr_load()
126 struct cxl_region_params *p = &cxlr->params; in cxl_region_decode_reset()
129 for (i = count - 1; i >= 0; i--) { in cxl_region_decode_reset()
130 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_decode_reset()
136 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_region_decode_reset()
137 iter = to_cxl_port(iter->dev.parent); in cxl_region_decode_reset()
140 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { in cxl_region_decode_reset()
145 cxld = cxl_rr->decoder; in cxl_region_decode_reset()
146 rc = cxld->reset(cxld); in cxl_region_decode_reset()
151 rc = cxled->cxld.reset(&cxled->cxld); in cxl_region_decode_reset()
161 struct cxl_region_params *p = &cxlr->params; in cxl_region_decode_commit()
164 for (i = 0; i < p->nr_targets; i++) { in cxl_region_decode_commit()
165 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_decode_commit()
174 iter = to_cxl_port(iter->dev.parent)) { in cxl_region_decode_commit()
176 cxld = cxl_rr->decoder; in cxl_region_decode_commit()
177 if (cxld->commit) in cxl_region_decode_commit()
178 rc = cxld->commit(cxld); in cxl_region_decode_commit()
186 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { in cxl_region_decode_commit()
188 cxld = cxl_rr->decoder; in cxl_region_decode_commit()
189 cxld->reset(cxld); in cxl_region_decode_commit()
192 cxled->cxld.reset(&cxled->cxld); in cxl_region_decode_commit()
209 struct cxl_region_params *p = &cxlr->params; in commit_store()
222 if (commit && p->state >= CXL_CONFIG_COMMIT) in commit_store()
224 if (!commit && p->state < CXL_CONFIG_COMMIT) in commit_store()
228 if (commit && p->state < CXL_CONFIG_ACTIVE) { in commit_store()
229 rc = -ENXIO; in commit_store()
236 p->state = CXL_CONFIG_RESET_PENDING; in commit_store()
238 device_release_driver(&cxlr->dev); in commit_store()
245 if (p->state == CXL_CONFIG_RESET_PENDING) in commit_store()
246 rc = cxl_region_decode_reset(cxlr, p->interleave_ways); in commit_store()
253 p->state = CXL_CONFIG_COMMIT; in commit_store()
254 else if (p->state == CXL_CONFIG_RESET_PENDING) in commit_store()
255 p->state = CXL_CONFIG_ACTIVE; in commit_store()
269 struct cxl_region_params *p = &cxlr->params; in commit_show()
275 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT); in commit_show()
288 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM) in cxl_region_visible()
290 return a->mode; in cxl_region_visible()
297 struct cxl_region_params *p = &cxlr->params; in interleave_ways_show()
303 rc = sysfs_emit(buf, "%d\n", p->interleave_ways); in interleave_ways_show()
315 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); in interleave_ways_store()
316 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in interleave_ways_store()
318 struct cxl_region_params *p = &cxlr->params; in interleave_ways_store()
332 * Even for x3, x9, and x12 interleaves the region interleave must be a in interleave_ways_store()
333 * power of 2 multiple of the host bridge interleave. in interleave_ways_store()
335 if (!is_power_of_2(val / cxld->interleave_ways) || in interleave_ways_store()
336 (val % cxld->interleave_ways)) { in interleave_ways_store()
337 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val); in interleave_ways_store()
338 return -EINVAL; in interleave_ways_store()
344 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { in interleave_ways_store()
345 rc = -EBUSY; in interleave_ways_store()
349 save = p->interleave_ways; in interleave_ways_store()
350 p->interleave_ways = val; in interleave_ways_store()
351 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); in interleave_ways_store()
353 p->interleave_ways = save; in interleave_ways_store()
367 struct cxl_region_params *p = &cxlr->params; in interleave_granularity_show()
373 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity); in interleave_granularity_show()
383 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); in interleave_granularity_store()
384 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in interleave_granularity_store()
386 struct cxl_region_params *p = &cxlr->params; in interleave_granularity_store()
399 * When the host-bridge is interleaved, disallow region granularity != in interleave_granularity_store()
401 * interleave result in needing multiple endpoints to support a single in interleave_granularity_store()
402 * slot in the interleave (possible to suport in the future). Regions in interleave_granularity_store()
403 * with a granularity greater than the root interleave result in invalid in interleave_granularity_store()
406 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity) in interleave_granularity_store()
407 return -EINVAL; in interleave_granularity_store()
412 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { in interleave_granularity_store()
413 rc = -EBUSY; in interleave_granularity_store()
417 p->interleave_granularity = val; in interleave_granularity_store()
430 struct cxl_region_params *p = &cxlr->params; in resource_show()
431 u64 resource = -1ULL; in resource_show()
437 if (p->res) in resource_show()
438 resource = p->res->start; in resource_show()
448 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in alloc_hpa()
449 struct cxl_region_params *p = &cxlr->params; in alloc_hpa()
456 if (p->res && resource_size(p->res) == size) in alloc_hpa()
460 if (p->res) in alloc_hpa()
461 return -EBUSY; in alloc_hpa()
463 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) in alloc_hpa()
464 return -EBUSY; in alloc_hpa()
467 if (!p->interleave_ways || !p->interleave_granularity || in alloc_hpa()
468 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid))) in alloc_hpa()
469 return -ENXIO; in alloc_hpa()
471 div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder); in alloc_hpa()
473 return -EINVAL; in alloc_hpa()
475 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M, in alloc_hpa()
476 dev_name(&cxlr->dev)); in alloc_hpa()
478 dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n", in alloc_hpa()
483 p->res = res; in alloc_hpa()
484 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; in alloc_hpa()
491 struct cxl_region_params *p = &cxlr->params; in cxl_region_iomem_release()
493 if (device_is_registered(&cxlr->dev)) in cxl_region_iomem_release()
495 if (p->res) { in cxl_region_iomem_release()
496 remove_resource(p->res); in cxl_region_iomem_release()
497 kfree(p->res); in cxl_region_iomem_release()
498 p->res = NULL; in cxl_region_iomem_release()
504 struct cxl_region_params *p = &cxlr->params; in free_hpa()
508 if (!p->res) in free_hpa()
511 if (p->state >= CXL_CONFIG_ACTIVE) in free_hpa()
512 return -EBUSY; in free_hpa()
515 p->state = CXL_CONFIG_IDLE; in free_hpa()
550 struct cxl_region_params *p = &cxlr->params; in size_show()
557 if (p->res) in size_show()
558 size = resource_size(p->res); in size_show()
583 struct cxl_region_params *p = &cxlr->params; in show_targetN()
591 if (pos >= p->interleave_ways) { in show_targetN()
592 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, in show_targetN()
593 p->interleave_ways); in show_targetN()
594 rc = -ENXIO; in show_targetN()
598 cxled = p->targets[pos]; in show_targetN()
602 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev)); in show_targetN()
620 if (cxld->id != *id) in match_free_decoder()
623 if (!cxld->region) in match_free_decoder()
637 dev = device_find_child(&port->dev, &id, match_free_decoder); in cxl_region_find_decoder()
653 struct cxl_region_params *p = &cxlr->params; in alloc_region_ref()
658 xa_for_each(&port->regions, index, iter) { in alloc_region_ref()
659 struct cxl_region_params *ip = &iter->region->params; in alloc_region_ref()
661 if (!ip->res) in alloc_region_ref()
664 if (ip->res->start > p->res->start) { in alloc_region_ref()
665 dev_dbg(&cxlr->dev, in alloc_region_ref()
667 dev_name(&port->dev), in alloc_region_ref()
668 dev_name(&iter->region->dev), ip->res, p->res); in alloc_region_ref()
669 return ERR_PTR(-EBUSY); in alloc_region_ref()
675 return ERR_PTR(-ENOMEM); in alloc_region_ref()
676 cxl_rr->port = port; in alloc_region_ref()
677 cxl_rr->region = cxlr; in alloc_region_ref()
678 cxl_rr->nr_targets = 1; in alloc_region_ref()
679 xa_init(&cxl_rr->endpoints); in alloc_region_ref()
681 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL); in alloc_region_ref()
683 dev_dbg(&cxlr->dev, in alloc_region_ref()
685 dev_name(&port->dev), rc); in alloc_region_ref()
695 struct cxl_region *cxlr = cxl_rr->region; in cxl_rr_free_decoder()
696 struct cxl_decoder *cxld = cxl_rr->decoder; in cxl_rr_free_decoder()
701 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n"); in cxl_rr_free_decoder()
702 if (cxld->region == cxlr) { in cxl_rr_free_decoder()
703 cxld->region = NULL; in cxl_rr_free_decoder()
704 put_device(&cxlr->dev); in cxl_rr_free_decoder()
710 struct cxl_port *port = cxl_rr->port; in free_region_ref()
711 struct cxl_region *cxlr = cxl_rr->region; in free_region_ref()
714 xa_erase(&port->regions, (unsigned long)cxlr); in free_region_ref()
715 xa_destroy(&cxl_rr->endpoints); in free_region_ref()
723 struct cxl_port *port = cxl_rr->port; in cxl_rr_ep_add()
724 struct cxl_region *cxlr = cxl_rr->region; in cxl_rr_ep_add()
725 struct cxl_decoder *cxld = cxl_rr->decoder; in cxl_rr_ep_add()
729 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep, in cxl_rr_ep_add()
734 cxl_rr->nr_eps++; in cxl_rr_ep_add()
736 if (!cxld->region) { in cxl_rr_ep_add()
737 cxld->region = cxlr; in cxl_rr_ep_add()
738 get_device(&cxlr->dev); in cxl_rr_ep_add()
751 cxld = &cxled->cxld; in cxl_rr_alloc_decoder()
755 dev_dbg(&cxlr->dev, "%s: no decoder available\n", in cxl_rr_alloc_decoder()
756 dev_name(&port->dev)); in cxl_rr_alloc_decoder()
757 return -EBUSY; in cxl_rr_alloc_decoder()
760 if (cxld->region) { in cxl_rr_alloc_decoder()
761 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", in cxl_rr_alloc_decoder()
762 dev_name(&port->dev), dev_name(&cxld->dev), in cxl_rr_alloc_decoder()
763 dev_name(&cxld->region->dev)); in cxl_rr_alloc_decoder()
764 return -EBUSY; in cxl_rr_alloc_decoder()
767 cxl_rr->decoder = cxld; in cxl_rr_alloc_decoder()
772 * cxl_port_attach_region() - track a region's interest in a port by endpoint
776 * @pos: interleave position of @cxled in @cxlr
784 * - validate that there are no other regions with a higher HPA already
786 * - establish a region reference if one is not already present
788 * - additionally allocate a decoder instance that will host @cxlr on
791 * - pin the region reference by the endpoint
792 * - account for how many entries in @port's target list are needed to
805 int rc = -EBUSY; in cxl_port_attach_region()
820 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { in cxl_port_attach_region()
823 if (ep_iter->next == ep->next) { in cxl_port_attach_region()
833 if (!found || !ep->next) { in cxl_port_attach_region()
834 cxl_rr->nr_targets++; in cxl_port_attach_region()
840 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
842 dev_name(&port->dev)); in cxl_port_attach_region()
851 cxld = cxl_rr->decoder; in cxl_port_attach_region()
855 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
857 dev_name(&port->dev), dev_name(&cxlmd->dev), in cxl_port_attach_region()
858 dev_name(&cxld->dev)); in cxl_port_attach_region()
862 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
864 dev_name(port->uport), dev_name(&port->dev), in cxl_port_attach_region()
865 dev_name(&cxld->dev), dev_name(&cxlmd->dev), in cxl_port_attach_region()
866 dev_name(&cxled->cxld.dev), pos, in cxl_port_attach_region()
867 ep ? ep->next ? dev_name(ep->next->uport) : in cxl_port_attach_region()
868 dev_name(&cxlmd->dev) : in cxl_port_attach_region()
870 cxl_rr->nr_eps, cxl_rr->nr_targets); in cxl_port_attach_region()
875 cxl_rr->nr_targets--; in cxl_port_attach_region()
876 if (cxl_rr->nr_eps == 0) in cxl_port_attach_region()
898 if (cxl_rr->decoder == &cxled->cxld) in cxl_port_detach_region()
899 cxl_rr->nr_eps--; in cxl_port_detach_region()
901 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled); in cxl_port_detach_region()
907 cxl_rr->nr_eps--; in cxl_port_detach_region()
908 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { in cxl_port_detach_region()
909 if (ep_iter->next == ep->next) { in cxl_port_detach_region()
915 cxl_rr->nr_targets--; in cxl_port_detach_region()
918 if (cxl_rr->nr_eps == 0) in cxl_port_detach_region()
927 struct cxl_region *cxlr = cxl_rr->region; in check_last_peer()
928 struct cxl_region_params *p = &cxlr->params; in check_last_peer()
930 struct cxl_port *port = cxl_rr->port; in check_last_peer()
933 int pos = cxled->pos; in check_last_peer()
937 * then that endpoint, at index 'position - distance', must also be in check_last_peer()
941 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n", in check_last_peer()
942 dev_name(port->uport), dev_name(&port->dev), in check_last_peer()
943 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); in check_last_peer()
944 return -ENXIO; in check_last_peer()
946 cxled_peer = p->targets[pos - distance]; in check_last_peer()
949 if (ep->dport != ep_peer->dport) { in check_last_peer()
950 dev_dbg(&cxlr->dev, in check_last_peer()
952 dev_name(port->uport), dev_name(&port->dev), in check_last_peer()
953 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos, in check_last_peer()
954 dev_name(&cxlmd_peer->dev), in check_last_peer()
955 dev_name(&cxled_peer->cxld.dev)); in check_last_peer()
956 return -ENXIO; in check_last_peer()
966 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in cxl_port_setup_targets()
967 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos; in cxl_port_setup_targets()
968 struct cxl_port *parent_port = to_cxl_port(port->dev.parent); in cxl_port_setup_targets()
972 struct cxl_region_params *p = &cxlr->params; in cxl_port_setup_targets()
973 struct cxl_decoder *cxld = cxl_rr->decoder; in cxl_port_setup_targets()
982 if (!is_power_of_2(cxl_rr->nr_targets)) { in cxl_port_setup_targets()
983 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n", in cxl_port_setup_targets()
984 dev_name(port->uport), dev_name(&port->dev), in cxl_port_setup_targets()
985 cxl_rr->nr_targets); in cxl_port_setup_targets()
986 return -EINVAL; in cxl_port_setup_targets()
989 cxlsd = to_cxl_switch_decoder(&cxld->dev); in cxl_port_setup_targets()
990 if (cxl_rr->nr_targets_set) { in cxl_port_setup_targets()
997 if (port->nr_dports == 1) in cxl_port_setup_targets()
1000 distance = p->nr_targets / cxl_rr->nr_targets; in cxl_port_setup_targets()
1001 for (i = 0; i < cxl_rr->nr_targets_set; i++) in cxl_port_setup_targets()
1002 if (ep->dport == cxlsd->target[i]) { in cxl_port_setup_targets()
1013 parent_ig = cxlrd->cxlsd.cxld.interleave_granularity; in cxl_port_setup_targets()
1014 parent_iw = cxlrd->cxlsd.cxld.interleave_ways; in cxl_port_setup_targets()
1016 * For purposes of address bit routing, use power-of-2 math for in cxl_port_setup_targets()
1026 parent_cxld = parent_rr->decoder; in cxl_port_setup_targets()
1027 parent_ig = parent_cxld->interleave_granularity; in cxl_port_setup_targets()
1028 parent_iw = parent_cxld->interleave_ways; in cxl_port_setup_targets()
1033 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n", in cxl_port_setup_targets()
1034 dev_name(parent_port->uport), in cxl_port_setup_targets()
1035 dev_name(&parent_port->dev), parent_ig); in cxl_port_setup_targets()
1041 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n", in cxl_port_setup_targets()
1042 dev_name(parent_port->uport), in cxl_port_setup_targets()
1043 dev_name(&parent_port->dev), parent_iw); in cxl_port_setup_targets()
1047 iw = cxl_rr->nr_targets; in cxl_port_setup_targets()
1050 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n", in cxl_port_setup_targets()
1051 dev_name(port->uport), dev_name(&port->dev), iw); in cxl_port_setup_targets()
1059 if (parent_iw > 1 && cxl_rr->nr_targets > 1) { in cxl_port_setup_targets()
1062 eig = address_bit - eiw + 1; in cxl_port_setup_targets()
1070 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n", in cxl_port_setup_targets()
1071 dev_name(port->uport), dev_name(&port->dev), in cxl_port_setup_targets()
1076 cxld->interleave_ways = iw; in cxl_port_setup_targets()
1077 cxld->interleave_granularity = ig; in cxl_port_setup_targets()
1078 cxld->hpa_range = (struct range) { in cxl_port_setup_targets()
1079 .start = p->res->start, in cxl_port_setup_targets()
1080 .end = p->res->end, in cxl_port_setup_targets()
1082 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport), in cxl_port_setup_targets()
1083 dev_name(&port->dev), iw, ig); in cxl_port_setup_targets()
1085 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) { in cxl_port_setup_targets()
1086 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1088 dev_name(port->uport), dev_name(&port->dev), in cxl_port_setup_targets()
1089 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); in cxl_port_setup_targets()
1090 return -ENXIO; in cxl_port_setup_targets()
1092 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport; in cxl_port_setup_targets()
1095 cxl_rr->nr_targets_set += inc; in cxl_port_setup_targets()
1096 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n", in cxl_port_setup_targets()
1097 dev_name(port->uport), dev_name(&port->dev), in cxl_port_setup_targets()
1098 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport), in cxl_port_setup_targets()
1099 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); in cxl_port_setup_targets()
1116 cxl_rr->nr_targets_set = 0; in cxl_port_reset_targets()
1118 cxld = cxl_rr->decoder; in cxl_port_reset_targets()
1119 cxld->hpa_range = (struct range) { in cxl_port_reset_targets()
1121 .end = -1, in cxl_port_reset_targets()
1127 struct cxl_region_params *p = &cxlr->params; in cxl_region_teardown_targets()
1134 for (i = 0; i < p->nr_targets; i++) { in cxl_region_teardown_targets()
1135 cxled = p->targets[i]; in cxl_region_teardown_targets()
1139 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_region_teardown_targets()
1140 iter = to_cxl_port(iter->dev.parent); in cxl_region_teardown_targets()
1143 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) in cxl_region_teardown_targets()
1150 struct cxl_region_params *p = &cxlr->params; in cxl_region_setup_targets()
1157 for (i = 0; i < p->nr_targets; i++) { in cxl_region_setup_targets()
1158 cxled = p->targets[i]; in cxl_region_setup_targets()
1162 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_region_setup_targets()
1163 iter = to_cxl_port(iter->dev.parent); in cxl_region_setup_targets()
1170 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { in cxl_region_setup_targets()
1185 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in cxl_region_attach()
1188 struct cxl_region_params *p = &cxlr->params; in cxl_region_attach()
1190 int i, rc = -ENXIO; in cxl_region_attach()
1192 if (cxled->mode == CXL_DECODER_DEAD) { in cxl_region_attach()
1193 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev)); in cxl_region_attach()
1194 return -ENODEV; in cxl_region_attach()
1197 /* all full of members, or interleave config not established? */ in cxl_region_attach()
1198 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) { in cxl_region_attach()
1199 dev_dbg(&cxlr->dev, "region already active\n"); in cxl_region_attach()
1200 return -EBUSY; in cxl_region_attach()
1201 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) { in cxl_region_attach()
1202 dev_dbg(&cxlr->dev, "interleave config missing\n"); in cxl_region_attach()
1203 return -ENXIO; in cxl_region_attach()
1206 if (pos < 0 || pos >= p->interleave_ways) { in cxl_region_attach()
1207 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, in cxl_region_attach()
1208 p->interleave_ways); in cxl_region_attach()
1209 return -ENXIO; in cxl_region_attach()
1212 if (p->targets[pos] == cxled) in cxl_region_attach()
1215 if (p->targets[pos]) { in cxl_region_attach()
1216 struct cxl_endpoint_decoder *cxled_target = p->targets[pos]; in cxl_region_attach()
1219 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n", in cxl_region_attach()
1220 pos, dev_name(&cxlmd_target->dev), in cxl_region_attach()
1221 dev_name(&cxled_target->cxld.dev)); in cxl_region_attach()
1222 return -EBUSY; in cxl_region_attach()
1225 for (i = 0; i < p->interleave_ways; i++) { in cxl_region_attach()
1229 cxled_target = p->targets[pos]; in cxl_region_attach()
1235 dev_dbg(&cxlr->dev, in cxl_region_attach()
1237 dev_name(&cxlmd->dev), pos, in cxl_region_attach()
1238 dev_name(&cxled_target->cxld.dev)); in cxl_region_attach()
1239 return -EBUSY; in cxl_region_attach()
1245 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge); in cxl_region_attach()
1247 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n", in cxl_region_attach()
1248 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1249 dev_name(cxlr->dev.parent)); in cxl_region_attach()
1250 return -ENXIO; in cxl_region_attach()
1253 if (cxlrd->calc_hb(cxlrd, pos) != dport) { in cxl_region_attach()
1254 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n", in cxl_region_attach()
1255 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1256 dev_name(&cxlrd->cxlsd.cxld.dev)); in cxl_region_attach()
1257 return -ENXIO; in cxl_region_attach()
1260 if (cxled->cxld.target_type != cxlr->type) { in cxl_region_attach()
1261 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n", in cxl_region_attach()
1262 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1263 cxled->cxld.target_type, cxlr->type); in cxl_region_attach()
1264 return -ENXIO; in cxl_region_attach()
1267 if (!cxled->dpa_res) { in cxl_region_attach()
1268 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n", in cxl_region_attach()
1269 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev)); in cxl_region_attach()
1270 return -ENXIO; in cxl_region_attach()
1273 if (resource_size(cxled->dpa_res) * p->interleave_ways != in cxl_region_attach()
1274 resource_size(p->res)) { in cxl_region_attach()
1275 dev_dbg(&cxlr->dev, in cxl_region_attach()
1276 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n", in cxl_region_attach()
1277 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1278 (u64)resource_size(cxled->dpa_res), p->interleave_ways, in cxl_region_attach()
1279 (u64)resource_size(p->res)); in cxl_region_attach()
1280 return -EINVAL; in cxl_region_attach()
1284 iter = to_cxl_port(iter->dev.parent)) { in cxl_region_attach()
1290 p->targets[pos] = cxled; in cxl_region_attach()
1291 cxled->pos = pos; in cxl_region_attach()
1292 p->nr_targets++; in cxl_region_attach()
1294 if (p->nr_targets == p->interleave_ways) { in cxl_region_attach()
1298 p->state = CXL_CONFIG_ACTIVE; in cxl_region_attach()
1301 cxled->cxld.interleave_ways = p->interleave_ways; in cxl_region_attach()
1302 cxled->cxld.interleave_granularity = p->interleave_granularity; in cxl_region_attach()
1303 cxled->cxld.hpa_range = (struct range) { in cxl_region_attach()
1304 .start = p->res->start, in cxl_region_attach()
1305 .end = p->res->end, in cxl_region_attach()
1311 p->nr_targets--; in cxl_region_attach()
1314 iter = to_cxl_port(iter->dev.parent)) in cxl_region_attach()
1322 struct cxl_region *cxlr = cxled->cxld.region; in cxl_region_detach()
1331 p = &cxlr->params; in cxl_region_detach()
1332 get_device(&cxlr->dev); in cxl_region_detach()
1334 if (p->state > CXL_CONFIG_ACTIVE) { in cxl_region_detach()
1339 rc = cxl_region_decode_reset(cxlr, p->interleave_ways); in cxl_region_detach()
1342 p->state = CXL_CONFIG_ACTIVE; in cxl_region_detach()
1346 iter = to_cxl_port(iter->dev.parent)) in cxl_region_detach()
1349 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways || in cxl_region_detach()
1350 p->targets[cxled->pos] != cxled) { in cxl_region_detach()
1353 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n", in cxl_region_detach()
1354 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_detach()
1355 cxled->pos); in cxl_region_detach()
1359 if (p->state == CXL_CONFIG_ACTIVE) { in cxl_region_detach()
1360 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; in cxl_region_detach()
1363 p->targets[cxled->pos] = NULL; in cxl_region_detach()
1364 p->nr_targets--; in cxl_region_detach()
1365 cxled->cxld.hpa_range = (struct range) { in cxl_region_detach()
1367 .end = -1, in cxl_region_detach()
1372 device_release_driver(&cxlr->dev); in cxl_region_detach()
1375 put_device(&cxlr->dev); in cxl_region_detach()
1382 cxled->mode = CXL_DECODER_DEAD; in cxl_decoder_kill_region()
1394 return -ENODEV; in attach_target()
1398 return -EINVAL; in attach_target()
1415 struct cxl_region_params *p = &cxlr->params; in detach_target()
1422 if (pos >= p->interleave_ways) { in detach_target()
1423 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, in detach_target()
1424 p->interleave_ways); in detach_target()
1425 rc = -ENXIO; in detach_target()
1429 if (!p->targets[pos]) { in detach_target()
1434 rc = cxl_region_detach(p->targets[pos]); in detach_target()
1511 struct cxl_region_params *p = &cxlr->params; in cxl_region_target_visible()
1513 if (n < p->interleave_ways) in cxl_region_target_visible()
1514 return a->mode; in cxl_region_target_visible()
1537 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); in cxl_region_release()
1539 int id = atomic_read(&cxlrd->region_id); in cxl_region_release()
1546 if (cxlr->id < id) in cxl_region_release()
1547 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) { in cxl_region_release()
1552 memregion_free(cxlr->id); in cxl_region_release()
1554 put_device(dev->parent); in cxl_region_release()
1566 return dev->type == &cxl_region_type; in is_cxl_region()
1572 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type, in to_cxl_region()
1582 struct cxl_region_params *p = &cxlr->params; in unregister_region()
1589 * read-only, so no need to hold the region rwsem to access the in unregister_region()
1592 for (i = 0; i < p->interleave_ways; i++) in unregister_region()
1609 return ERR_PTR(-ENOMEM); in cxl_region_alloc()
1612 dev = &cxlr->dev; in cxl_region_alloc()
1614 lockdep_set_class(&dev->mutex, &cxl_region_key); in cxl_region_alloc()
1615 dev->parent = &cxlrd->cxlsd.cxld.dev; in cxl_region_alloc()
1620 get_device(dev->parent); in cxl_region_alloc()
1622 dev->bus = &cxl_bus_type; in cxl_region_alloc()
1623 dev->type = &cxl_region_type; in cxl_region_alloc()
1624 cxlr->id = id; in cxl_region_alloc()
1630 * devm_cxl_add_region - Adds a region to a decoder
1633 * @mode: mode for the endpoint decoders of this region
1634 * @type: select whether this is an expander or accelerator (type-2 or type-3)
1644 enum cxl_decoder_mode mode, in devm_cxl_add_region() argument
1647 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); in devm_cxl_add_region()
1655 cxlr->mode = mode; in devm_cxl_add_region()
1656 cxlr->type = type; in devm_cxl_add_region()
1658 dev = &cxlr->dev; in devm_cxl_add_region()
1667 rc = devm_add_action_or_reset(port->uport, unregister_region, cxlr); in devm_cxl_add_region()
1671 dev_dbg(port->uport, "%s: created %s\n", in devm_cxl_add_region()
1672 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev)); in devm_cxl_add_region()
1685 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id)); in create_pmem_region_show()
1698 return -EINVAL; in create_pmem_region_store()
1704 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) { in create_pmem_region_store()
1706 return -EBUSY; in create_pmem_region_store()
1728 if (cxld->region) in region_show()
1729 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev)); in region_show()
1741 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in cxl_find_region_by_name()
1744 region_dev = device_find_child_by_name(&cxld->dev, name); in cxl_find_region_by_name()
1746 return ERR_PTR(-ENODEV); in cxl_find_region_by_name()
1756 struct cxl_port *port = to_cxl_port(dev->parent); in delete_region_store()
1763 devm_release_action(port->uport, unregister_region, cxlr); in delete_region_store()
1764 put_device(&cxlr->dev); in delete_region_store()
1775 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { in cxl_pmem_region_release()
1776 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd; in cxl_pmem_region_release()
1778 put_device(&cxlmd->dev); in cxl_pmem_region_release()
1797 return dev->type == &cxl_pmem_region_type; in is_cxl_pmem_region()
1814 struct cxl_region_params *p = &cxlr->params; in cxl_pmem_region_alloc()
1820 if (p->state != CXL_CONFIG_COMMIT) { in cxl_pmem_region_alloc()
1821 cxlr_pmem = ERR_PTR(-ENXIO); in cxl_pmem_region_alloc()
1825 cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), in cxl_pmem_region_alloc()
1828 cxlr_pmem = ERR_PTR(-ENOMEM); in cxl_pmem_region_alloc()
1832 cxlr_pmem->hpa_range.start = p->res->start; in cxl_pmem_region_alloc()
1833 cxlr_pmem->hpa_range.end = p->res->end; in cxl_pmem_region_alloc()
1836 cxlr_pmem->nr_mappings = p->nr_targets; in cxl_pmem_region_alloc()
1837 for (i = 0; i < p->nr_targets; i++) { in cxl_pmem_region_alloc()
1838 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_pmem_region_alloc()
1840 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; in cxl_pmem_region_alloc()
1842 m->cxlmd = cxlmd; in cxl_pmem_region_alloc()
1843 get_device(&cxlmd->dev); in cxl_pmem_region_alloc()
1844 m->start = cxled->dpa_res->start; in cxl_pmem_region_alloc()
1845 m->size = resource_size(cxled->dpa_res); in cxl_pmem_region_alloc()
1846 m->position = i; in cxl_pmem_region_alloc()
1849 dev = &cxlr_pmem->dev; in cxl_pmem_region_alloc()
1850 cxlr_pmem->cxlr = cxlr; in cxl_pmem_region_alloc()
1852 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); in cxl_pmem_region_alloc()
1854 dev->parent = &cxlr->dev; in cxl_pmem_region_alloc()
1855 dev->bus = &cxl_bus_type; in cxl_pmem_region_alloc()
1856 dev->type = &cxl_pmem_region_type; in cxl_pmem_region_alloc()
1869 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
1884 dev = &cxlr_pmem->dev; in devm_cxl_add_pmem_region()
1885 rc = dev_set_name(dev, "pmem_region%d", cxlr->id); in devm_cxl_add_pmem_region()
1893 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), in devm_cxl_add_pmem_region()
1896 return devm_add_action_or_reset(&cxlr->dev, cxlr_pmem_unregister, dev); in devm_cxl_add_pmem_region()
1906 struct cxl_region_params *p = &cxlr->params; in cxl_region_probe()
1911 dev_dbg(&cxlr->dev, "probe interrupted\n"); in cxl_region_probe()
1915 if (p->state < CXL_CONFIG_COMMIT) { in cxl_region_probe()
1916 dev_dbg(&cxlr->dev, "config state: %d\n", p->state); in cxl_region_probe()
1917 rc = -ENXIO; in cxl_region_probe()
1926 switch (cxlr->mode) { in cxl_region_probe()
1930 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n", in cxl_region_probe()
1931 cxlr->mode); in cxl_region_probe()
1932 return -ENXIO; in cxl_region_probe()