1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/platform_device.h>
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/kernel.h>
7 #include <linux/acpi.h>
8 #include <linux/pci.h>
9 #include "cxlpci.h"
10 #include "cxl.h"
11
cfmws_to_decoder_flags(int restrictions)12 static unsigned long cfmws_to_decoder_flags(int restrictions)
13 {
14 unsigned long flags = CXL_DECODER_F_ENABLE;
15
16 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
17 flags |= CXL_DECODER_F_TYPE2;
18 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
19 flags |= CXL_DECODER_F_TYPE3;
20 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
21 flags |= CXL_DECODER_F_RAM;
22 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM)
23 flags |= CXL_DECODER_F_PMEM;
24 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED)
25 flags |= CXL_DECODER_F_LOCK;
26
27 return flags;
28 }
29
cxl_acpi_cfmws_verify(struct device * dev,struct acpi_cedt_cfmws * cfmws)30 static int cxl_acpi_cfmws_verify(struct device *dev,
31 struct acpi_cedt_cfmws *cfmws)
32 {
33 int rc, expected_len;
34 unsigned int ways;
35
36 if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) {
37 dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n");
38 return -EINVAL;
39 }
40
41 if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) {
42 dev_err(dev, "CFMWS Base HPA not 256MB aligned\n");
43 return -EINVAL;
44 }
45
46 if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) {
47 dev_err(dev, "CFMWS Window Size not 256MB aligned\n");
48 return -EINVAL;
49 }
50
51 rc = cxl_to_ways(cfmws->interleave_ways, &ways);
52 if (rc) {
53 dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n",
54 cfmws->interleave_ways);
55 return -EINVAL;
56 }
57
58 expected_len = struct_size(cfmws, interleave_targets, ways);
59
60 if (cfmws->header.length < expected_len) {
61 dev_err(dev, "CFMWS length %d less than expected %d\n",
62 cfmws->header.length, expected_len);
63 return -EINVAL;
64 }
65
66 if (cfmws->header.length > expected_len)
67 dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
68 cfmws->header.length, expected_len);
69
70 return 0;
71 }
72
73 struct cxl_cfmws_context {
74 struct device *dev;
75 struct cxl_port *root_port;
76 struct resource *cxl_res;
77 int id;
78 };
79
cxl_parse_cfmws(union acpi_subtable_headers * header,void * arg,const unsigned long end)80 static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
81 const unsigned long end)
82 {
83 int target_map[CXL_DECODER_MAX_INTERLEAVE];
84 struct cxl_cfmws_context *ctx = arg;
85 struct cxl_port *root_port = ctx->root_port;
86 struct resource *cxl_res = ctx->cxl_res;
87 struct cxl_root_decoder *cxlrd;
88 struct device *dev = ctx->dev;
89 struct acpi_cedt_cfmws *cfmws;
90 struct cxl_decoder *cxld;
91 unsigned int ways, i, ig;
92 struct resource *res;
93 int rc;
94
95 cfmws = (struct acpi_cedt_cfmws *) header;
96
97 rc = cxl_acpi_cfmws_verify(dev, cfmws);
98 if (rc) {
99 dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
100 cfmws->base_hpa,
101 cfmws->base_hpa + cfmws->window_size - 1);
102 return 0;
103 }
104
105 rc = cxl_to_ways(cfmws->interleave_ways, &ways);
106 if (rc)
107 return rc;
108 rc = cxl_to_granularity(cfmws->granularity, &ig);
109 if (rc)
110 return rc;
111 for (i = 0; i < ways; i++)
112 target_map[i] = cfmws->interleave_targets[i];
113
114 res = kzalloc(sizeof(*res), GFP_KERNEL);
115 if (!res)
116 return -ENOMEM;
117
118 res->name = kasprintf(GFP_KERNEL, "CXL Window %d", ctx->id++);
119 if (!res->name)
120 goto err_name;
121
122 res->start = cfmws->base_hpa;
123 res->end = cfmws->base_hpa + cfmws->window_size - 1;
124 res->flags = IORESOURCE_MEM;
125
126 /* add to the local resource tracking to establish a sort order */
127 rc = insert_resource(cxl_res, res);
128 if (rc)
129 goto err_insert;
130
131 cxlrd = cxl_root_decoder_alloc(root_port, ways);
132 if (IS_ERR(cxlrd))
133 return 0;
134
135 cxld = &cxlrd->cxlsd.cxld;
136 cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
137 cxld->target_type = CXL_DECODER_EXPANDER;
138 cxld->hpa_range = (struct range) {
139 .start = res->start,
140 .end = res->end,
141 };
142 cxld->interleave_ways = ways;
143 /*
144 * Minimize the x1 granularity to advertise support for any
145 * valid region granularity
146 */
147 if (ways == 1)
148 ig = CXL_DECODER_MIN_GRANULARITY;
149 cxld->interleave_granularity = ig;
150
151 rc = cxl_decoder_add(cxld, target_map);
152 if (rc)
153 put_device(&cxld->dev);
154 else
155 rc = cxl_decoder_autoremove(dev, cxld);
156 if (rc) {
157 dev_err(dev, "Failed to add decode range [%#llx - %#llx]\n",
158 cxld->hpa_range.start, cxld->hpa_range.end);
159 return 0;
160 }
161 dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
162 dev_name(&cxld->dev),
163 phys_to_target_node(cxld->hpa_range.start),
164 cxld->hpa_range.start, cxld->hpa_range.end);
165
166 return 0;
167
168 err_insert:
169 kfree(res->name);
170 err_name:
171 kfree(res);
172 return -ENOMEM;
173 }
174
to_cxl_host_bridge(struct device * host,struct device * dev)175 __mock struct acpi_device *to_cxl_host_bridge(struct device *host,
176 struct device *dev)
177 {
178 struct acpi_device *adev = to_acpi_device(dev);
179
180 if (!acpi_pci_find_root(adev->handle))
181 return NULL;
182
183 if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
184 return adev;
185 return NULL;
186 }
187
188 /*
189 * A host bridge is a dport to a CFMWS decode and it is a uport to the
190 * dport (PCIe Root Ports) in the host bridge.
191 */
add_host_bridge_uport(struct device * match,void * arg)192 static int add_host_bridge_uport(struct device *match, void *arg)
193 {
194 struct cxl_port *root_port = arg;
195 struct device *host = root_port->dev.parent;
196 struct acpi_device *bridge = to_cxl_host_bridge(host, match);
197 struct acpi_pci_root *pci_root;
198 struct cxl_dport *dport;
199 struct cxl_port *port;
200 int rc;
201
202 if (!bridge)
203 return 0;
204
205 dport = cxl_find_dport_by_dev(root_port, match);
206 if (!dport) {
207 dev_dbg(host, "host bridge expected and not found\n");
208 return 0;
209 }
210
211 /*
212 * Note that this lookup already succeeded in
213 * to_cxl_host_bridge(), so no need to check for failure here
214 */
215 pci_root = acpi_pci_find_root(bridge->handle);
216 rc = devm_cxl_register_pci_bus(host, match, pci_root->bus);
217 if (rc)
218 return rc;
219
220 port = devm_cxl_add_port(host, match, dport->component_reg_phys, dport);
221 if (IS_ERR(port))
222 return PTR_ERR(port);
223 dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
224
225 return 0;
226 }
227
228 struct cxl_chbs_context {
229 struct device *dev;
230 unsigned long long uid;
231 resource_size_t chbcr;
232 };
233
cxl_get_chbcr(union acpi_subtable_headers * header,void * arg,const unsigned long end)234 static int cxl_get_chbcr(union acpi_subtable_headers *header, void *arg,
235 const unsigned long end)
236 {
237 struct cxl_chbs_context *ctx = arg;
238 struct acpi_cedt_chbs *chbs;
239
240 if (ctx->chbcr)
241 return 0;
242
243 chbs = (struct acpi_cedt_chbs *) header;
244
245 if (ctx->uid != chbs->uid)
246 return 0;
247 ctx->chbcr = chbs->base;
248
249 return 0;
250 }
251
add_host_bridge_dport(struct device * match,void * arg)252 static int add_host_bridge_dport(struct device *match, void *arg)
253 {
254 acpi_status status;
255 unsigned long long uid;
256 struct cxl_dport *dport;
257 struct cxl_chbs_context ctx;
258 struct cxl_port *root_port = arg;
259 struct device *host = root_port->dev.parent;
260 struct acpi_device *bridge = to_cxl_host_bridge(host, match);
261
262 if (!bridge)
263 return 0;
264
265 status = acpi_evaluate_integer(bridge->handle, METHOD_NAME__UID, NULL,
266 &uid);
267 if (status != AE_OK) {
268 dev_err(host, "unable to retrieve _UID of %s\n",
269 dev_name(match));
270 return -ENODEV;
271 }
272
273 ctx = (struct cxl_chbs_context) {
274 .dev = host,
275 .uid = uid,
276 };
277 acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbcr, &ctx);
278
279 if (ctx.chbcr == 0) {
280 dev_warn(host, "No CHBS found for Host Bridge: %s\n",
281 dev_name(match));
282 return 0;
283 }
284
285 dport = devm_cxl_add_dport(root_port, match, uid, ctx.chbcr);
286 if (IS_ERR(dport)) {
287 dev_err(host, "failed to add downstream port: %s\n",
288 dev_name(match));
289 return PTR_ERR(dport);
290 }
291 dev_dbg(host, "add dport%llu: %s\n", uid, dev_name(match));
292 return 0;
293 }
294
add_root_nvdimm_bridge(struct device * match,void * data)295 static int add_root_nvdimm_bridge(struct device *match, void *data)
296 {
297 struct cxl_decoder *cxld;
298 struct cxl_port *root_port = data;
299 struct cxl_nvdimm_bridge *cxl_nvb;
300 struct device *host = root_port->dev.parent;
301
302 if (!is_root_decoder(match))
303 return 0;
304
305 cxld = to_cxl_decoder(match);
306 if (!(cxld->flags & CXL_DECODER_F_PMEM))
307 return 0;
308
309 cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port);
310 if (IS_ERR(cxl_nvb)) {
311 dev_dbg(host, "failed to register pmem\n");
312 return PTR_ERR(cxl_nvb);
313 }
314 dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev),
315 dev_name(&cxl_nvb->dev));
316 return 1;
317 }
318
319 static struct lock_class_key cxl_root_key;
320
cxl_acpi_lock_reset_class(void * dev)321 static void cxl_acpi_lock_reset_class(void *dev)
322 {
323 device_lock_reset_class(dev);
324 }
325
del_cxl_resource(struct resource * res)326 static void del_cxl_resource(struct resource *res)
327 {
328 kfree(res->name);
329 kfree(res);
330 }
331
cxl_set_public_resource(struct resource * priv,struct resource * pub)332 static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
333 {
334 priv->desc = (unsigned long) pub;
335 }
336
cxl_get_public_resource(struct resource * priv)337 static struct resource *cxl_get_public_resource(struct resource *priv)
338 {
339 return (struct resource *) priv->desc;
340 }
341
remove_cxl_resources(void * data)342 static void remove_cxl_resources(void *data)
343 {
344 struct resource *res, *next, *cxl = data;
345
346 for (res = cxl->child; res; res = next) {
347 struct resource *victim = cxl_get_public_resource(res);
348
349 next = res->sibling;
350 remove_resource(res);
351
352 if (victim) {
353 remove_resource(victim);
354 kfree(victim);
355 }
356
357 del_cxl_resource(res);
358 }
359 }
360
361 /**
362 * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource
363 * @cxl_res: A standalone resource tree where each CXL window is a sibling
364 *
365 * Walk each CXL window in @cxl_res and add it to iomem_resource potentially
366 * expanding its boundaries to ensure that any conflicting resources become
367 * children. If a window is expanded it may then conflict with a another window
368 * entry and require the window to be truncated or trimmed. Consider this
369 * situation:
370 *
371 * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
372 * |--------------- "System RAM" -------------|
373 *
374 * ...where platform firmware has established as System RAM resource across 2
375 * windows, but has left some portion of window 1 for dynamic CXL region
376 * provisioning. In this case "Window 0" will span the entirety of the "System
377 * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end
378 * of that "System RAM" resource.
379 */
add_cxl_resources(struct resource * cxl_res)380 static int add_cxl_resources(struct resource *cxl_res)
381 {
382 struct resource *res, *new, *next;
383
384 for (res = cxl_res->child; res; res = next) {
385 new = kzalloc(sizeof(*new), GFP_KERNEL);
386 if (!new)
387 return -ENOMEM;
388 new->name = res->name;
389 new->start = res->start;
390 new->end = res->end;
391 new->flags = IORESOURCE_MEM;
392 new->desc = IORES_DESC_CXL;
393
394 /*
395 * Record the public resource in the private cxl_res tree for
396 * later removal.
397 */
398 cxl_set_public_resource(res, new);
399
400 insert_resource_expand_to_fit(&iomem_resource, new);
401
402 next = res->sibling;
403 while (next && resource_overlaps(new, next)) {
404 if (resource_contains(new, next)) {
405 struct resource *_next = next->sibling;
406
407 remove_resource(next);
408 del_cxl_resource(next);
409 next = _next;
410 } else
411 next->start = new->end + 1;
412 }
413 }
414 return 0;
415 }
416
pair_cxl_resource(struct device * dev,void * data)417 static int pair_cxl_resource(struct device *dev, void *data)
418 {
419 struct resource *cxl_res = data;
420 struct resource *p;
421
422 if (!is_root_decoder(dev))
423 return 0;
424
425 for (p = cxl_res->child; p; p = p->sibling) {
426 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
427 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
428 struct resource res = {
429 .start = cxld->hpa_range.start,
430 .end = cxld->hpa_range.end,
431 .flags = IORESOURCE_MEM,
432 };
433
434 if (resource_contains(p, &res)) {
435 cxlrd->res = cxl_get_public_resource(p);
436 break;
437 }
438 }
439
440 return 0;
441 }
442
cxl_acpi_probe(struct platform_device * pdev)443 static int cxl_acpi_probe(struct platform_device *pdev)
444 {
445 int rc;
446 struct resource *cxl_res;
447 struct cxl_port *root_port;
448 struct device *host = &pdev->dev;
449 struct acpi_device *adev = ACPI_COMPANION(host);
450 struct cxl_cfmws_context ctx;
451
452 device_lock_set_class(&pdev->dev, &cxl_root_key);
453 rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class,
454 &pdev->dev);
455 if (rc)
456 return rc;
457
458 cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL);
459 if (!cxl_res)
460 return -ENOMEM;
461 cxl_res->name = "CXL mem";
462 cxl_res->start = 0;
463 cxl_res->end = -1;
464 cxl_res->flags = IORESOURCE_MEM;
465
466 root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
467 if (IS_ERR(root_port))
468 return PTR_ERR(root_port);
469 dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
470
471 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
472 add_host_bridge_dport);
473 if (rc < 0)
474 return rc;
475
476 rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res);
477 if (rc)
478 return rc;
479
480 ctx = (struct cxl_cfmws_context) {
481 .dev = host,
482 .root_port = root_port,
483 .cxl_res = cxl_res,
484 };
485 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
486 if (rc < 0)
487 return -ENXIO;
488
489 rc = add_cxl_resources(cxl_res);
490 if (rc)
491 return rc;
492
493 /*
494 * Populate the root decoders with their related iomem resource,
495 * if present
496 */
497 device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource);
498
499 /*
500 * Root level scanned with host-bridge as dports, now scan host-bridges
501 * for their role as CXL uports to their CXL-capable PCIe Root Ports.
502 */
503 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
504 add_host_bridge_uport);
505 if (rc < 0)
506 return rc;
507
508 if (IS_ENABLED(CONFIG_CXL_PMEM))
509 rc = device_for_each_child(&root_port->dev, root_port,
510 add_root_nvdimm_bridge);
511 if (rc < 0)
512 return rc;
513
514 /* In case PCI is scanned before ACPI re-trigger memdev attach */
515 return cxl_bus_rescan();
516 }
517
518 static const struct acpi_device_id cxl_acpi_ids[] = {
519 { "ACPI0017" },
520 { },
521 };
522 MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
523
524 static const struct platform_device_id cxl_test_ids[] = {
525 { "cxl_acpi" },
526 { },
527 };
528 MODULE_DEVICE_TABLE(platform, cxl_test_ids);
529
530 static struct platform_driver cxl_acpi_driver = {
531 .probe = cxl_acpi_probe,
532 .driver = {
533 .name = KBUILD_MODNAME,
534 .acpi_match_table = cxl_acpi_ids,
535 },
536 .id_table = cxl_test_ids,
537 };
538
539 module_platform_driver(cxl_acpi_driver);
540 MODULE_LICENSE("GPL v2");
541 MODULE_IMPORT_NS(CXL);
542 MODULE_IMPORT_NS(ACPI);
543