1 /*
2  * pseries Memory Hotplug infrastructure.
3  *
4  * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
13 
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/memblock.h>
17 #include <linux/memory.h>
18 #include <linux/memory_hotplug.h>
19 #include <linux/slab.h>
20 
21 #include <asm/firmware.h>
22 #include <asm/machdep.h>
23 #include <asm/prom.h>
24 #include <asm/sparsemem.h>
25 #include <asm/fadump.h>
26 #include <asm/drmem.h>
27 #include "pseries.h"
28 
29 static bool rtas_hp_event;
30 
pseries_memory_block_size(void)31 unsigned long pseries_memory_block_size(void)
32 {
33 	struct device_node *np;
34 	unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
35 	struct resource r;
36 
37 	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
38 	if (np) {
39 		const __be64 *size;
40 
41 		size = of_get_property(np, "ibm,lmb-size", NULL);
42 		if (size)
43 			memblock_size = be64_to_cpup(size);
44 		of_node_put(np);
45 	} else  if (machine_is(pseries)) {
46 		/* This fallback really only applies to pseries */
47 		unsigned int memzero_size = 0;
48 
49 		np = of_find_node_by_path("/memory@0");
50 		if (np) {
51 			if (!of_address_to_resource(np, 0, &r))
52 				memzero_size = resource_size(&r);
53 			of_node_put(np);
54 		}
55 
56 		if (memzero_size) {
57 			/* We now know the size of memory@0, use this to find
58 			 * the first memoryblock and get its size.
59 			 */
60 			char buf[64];
61 
62 			sprintf(buf, "/memory@%x", memzero_size);
63 			np = of_find_node_by_path(buf);
64 			if (np) {
65 				if (!of_address_to_resource(np, 0, &r))
66 					memblock_size = resource_size(&r);
67 				of_node_put(np);
68 			}
69 		}
70 	}
71 	return memblock_size;
72 }
73 
dlpar_free_property(struct property * prop)74 static void dlpar_free_property(struct property *prop)
75 {
76 	kfree(prop->name);
77 	kfree(prop->value);
78 	kfree(prop);
79 }
80 
dlpar_clone_property(struct property * prop,u32 prop_size)81 static struct property *dlpar_clone_property(struct property *prop,
82 					     u32 prop_size)
83 {
84 	struct property *new_prop;
85 
86 	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
87 	if (!new_prop)
88 		return NULL;
89 
90 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
91 	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
92 	if (!new_prop->name || !new_prop->value) {
93 		dlpar_free_property(new_prop);
94 		return NULL;
95 	}
96 
97 	memcpy(new_prop->value, prop->value, prop->length);
98 	new_prop->length = prop_size;
99 
100 	of_property_set_flag(new_prop, OF_DYNAMIC);
101 	return new_prop;
102 }
103 
find_aa_index(struct device_node * dr_node,struct property * ala_prop,const u32 * lmb_assoc)104 static u32 find_aa_index(struct device_node *dr_node,
105 			 struct property *ala_prop, const u32 *lmb_assoc)
106 {
107 	u32 *assoc_arrays;
108 	u32 aa_index;
109 	int aa_arrays, aa_array_entries, aa_array_sz;
110 	int i, index;
111 
112 	/*
113 	 * The ibm,associativity-lookup-arrays property is defined to be
114 	 * a 32-bit value specifying the number of associativity arrays
115 	 * followed by a 32-bitvalue specifying the number of entries per
116 	 * array, followed by the associativity arrays.
117 	 */
118 	assoc_arrays = ala_prop->value;
119 
120 	aa_arrays = be32_to_cpu(assoc_arrays[0]);
121 	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
122 	aa_array_sz = aa_array_entries * sizeof(u32);
123 
124 	aa_index = -1;
125 	for (i = 0; i < aa_arrays; i++) {
126 		index = (i * aa_array_entries) + 2;
127 
128 		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
129 			continue;
130 
131 		aa_index = i;
132 		break;
133 	}
134 
135 	if (aa_index == -1) {
136 		struct property *new_prop;
137 		u32 new_prop_size;
138 
139 		new_prop_size = ala_prop->length + aa_array_sz;
140 		new_prop = dlpar_clone_property(ala_prop, new_prop_size);
141 		if (!new_prop)
142 			return -1;
143 
144 		assoc_arrays = new_prop->value;
145 
146 		/* increment the number of entries in the lookup array */
147 		assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
148 
149 		/* copy the new associativity into the lookup array */
150 		index = aa_arrays * aa_array_entries + 2;
151 		memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
152 
153 		of_update_property(dr_node, new_prop);
154 
155 		/*
156 		 * The associativity lookup array index for this lmb is
157 		 * number of entries - 1 since we added its associativity
158 		 * to the end of the lookup array.
159 		 */
160 		aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
161 	}
162 
163 	return aa_index;
164 }
165 
lookup_lmb_associativity_index(struct drmem_lmb * lmb)166 static u32 lookup_lmb_associativity_index(struct drmem_lmb *lmb)
167 {
168 	struct device_node *parent, *lmb_node, *dr_node;
169 	struct property *ala_prop;
170 	const u32 *lmb_assoc;
171 	u32 aa_index;
172 
173 	parent = of_find_node_by_path("/");
174 	if (!parent)
175 		return -ENODEV;
176 
177 	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
178 					     parent);
179 	of_node_put(parent);
180 	if (!lmb_node)
181 		return -EINVAL;
182 
183 	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
184 	if (!lmb_assoc) {
185 		dlpar_free_cc_nodes(lmb_node);
186 		return -ENODEV;
187 	}
188 
189 	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
190 	if (!dr_node) {
191 		dlpar_free_cc_nodes(lmb_node);
192 		return -ENODEV;
193 	}
194 
195 	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
196 				    NULL);
197 	if (!ala_prop) {
198 		of_node_put(dr_node);
199 		dlpar_free_cc_nodes(lmb_node);
200 		return -ENODEV;
201 	}
202 
203 	aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
204 
205 	dlpar_free_cc_nodes(lmb_node);
206 	return aa_index;
207 }
208 
dlpar_add_device_tree_lmb(struct drmem_lmb * lmb)209 static int dlpar_add_device_tree_lmb(struct drmem_lmb *lmb)
210 {
211 	int rc, aa_index;
212 
213 	lmb->flags |= DRCONF_MEM_ASSIGNED;
214 
215 	aa_index = lookup_lmb_associativity_index(lmb);
216 	if (aa_index < 0) {
217 		pr_err("Couldn't find associativity index for drc index %x\n",
218 		       lmb->drc_index);
219 		return aa_index;
220 	}
221 
222 	lmb->aa_index = aa_index;
223 
224 	rtas_hp_event = true;
225 	rc = drmem_update_dt();
226 	rtas_hp_event = false;
227 
228 	return rc;
229 }
230 
dlpar_remove_device_tree_lmb(struct drmem_lmb * lmb)231 static int dlpar_remove_device_tree_lmb(struct drmem_lmb *lmb)
232 {
233 	int rc;
234 
235 	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
236 	lmb->aa_index = 0xffffffff;
237 
238 	rtas_hp_event = true;
239 	rc = drmem_update_dt();
240 	rtas_hp_event = false;
241 
242 	return rc;
243 }
244 
lmb_to_memblock(struct drmem_lmb * lmb)245 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
246 {
247 	unsigned long section_nr;
248 	struct mem_section *mem_sect;
249 	struct memory_block *mem_block;
250 
251 	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
252 	mem_sect = __nr_to_section(section_nr);
253 
254 	mem_block = find_memory_block(mem_sect);
255 	return mem_block;
256 }
257 
get_lmb_range(u32 drc_index,int n_lmbs,struct drmem_lmb ** start_lmb,struct drmem_lmb ** end_lmb)258 static int get_lmb_range(u32 drc_index, int n_lmbs,
259 			 struct drmem_lmb **start_lmb,
260 			 struct drmem_lmb **end_lmb)
261 {
262 	struct drmem_lmb *lmb, *start, *end;
263 	struct drmem_lmb *last_lmb;
264 
265 	start = NULL;
266 	for_each_drmem_lmb(lmb) {
267 		if (lmb->drc_index == drc_index) {
268 			start = lmb;
269 			break;
270 		}
271 	}
272 
273 	if (!start)
274 		return -EINVAL;
275 
276 	end = &start[n_lmbs - 1];
277 
278 	last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
279 	if (end > last_lmb)
280 		return -EINVAL;
281 
282 	*start_lmb = start;
283 	*end_lmb = end;
284 	return 0;
285 }
286 
dlpar_change_lmb_state(struct drmem_lmb * lmb,bool online)287 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
288 {
289 	struct memory_block *mem_block;
290 	int rc;
291 
292 	mem_block = lmb_to_memblock(lmb);
293 	if (!mem_block)
294 		return -EINVAL;
295 
296 	if (online && mem_block->dev.offline)
297 		rc = device_online(&mem_block->dev);
298 	else if (!online && !mem_block->dev.offline)
299 		rc = device_offline(&mem_block->dev);
300 	else
301 		rc = 0;
302 
303 	put_device(&mem_block->dev);
304 
305 	return rc;
306 }
307 
dlpar_online_lmb(struct drmem_lmb * lmb)308 static int dlpar_online_lmb(struct drmem_lmb *lmb)
309 {
310 	return dlpar_change_lmb_state(lmb, true);
311 }
312 
313 #ifdef CONFIG_MEMORY_HOTREMOVE
dlpar_offline_lmb(struct drmem_lmb * lmb)314 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
315 {
316 	return dlpar_change_lmb_state(lmb, false);
317 }
318 
pseries_remove_memblock(unsigned long base,unsigned int memblock_size)319 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
320 {
321 	unsigned long block_sz, start_pfn;
322 	int sections_per_block;
323 	int i, nid;
324 
325 	start_pfn = base >> PAGE_SHIFT;
326 
327 	lock_device_hotplug();
328 
329 	if (!pfn_valid(start_pfn))
330 		goto out;
331 
332 	block_sz = pseries_memory_block_size();
333 	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
334 	nid = memory_add_physaddr_to_nid(base);
335 
336 	for (i = 0; i < sections_per_block; i++) {
337 		remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
338 		base += MIN_MEMORY_BLOCK_SIZE;
339 	}
340 
341 out:
342 	/* Update memory regions for memory remove */
343 	memblock_remove(base, memblock_size);
344 	unlock_device_hotplug();
345 	return 0;
346 }
347 
pseries_remove_mem_node(struct device_node * np)348 static int pseries_remove_mem_node(struct device_node *np)
349 {
350 	const char *type;
351 	const __be32 *regs;
352 	unsigned long base;
353 	unsigned int lmb_size;
354 	int ret = -EINVAL;
355 
356 	/*
357 	 * Check to see if we are actually removing memory
358 	 */
359 	type = of_get_property(np, "device_type", NULL);
360 	if (type == NULL || strcmp(type, "memory") != 0)
361 		return 0;
362 
363 	/*
364 	 * Find the base address and size of the memblock
365 	 */
366 	regs = of_get_property(np, "reg", NULL);
367 	if (!regs)
368 		return ret;
369 
370 	base = be64_to_cpu(*(unsigned long *)regs);
371 	lmb_size = be32_to_cpu(regs[3]);
372 
373 	pseries_remove_memblock(base, lmb_size);
374 	return 0;
375 }
376 
lmb_is_removable(struct drmem_lmb * lmb)377 static bool lmb_is_removable(struct drmem_lmb *lmb)
378 {
379 	int i, scns_per_block;
380 	int rc = 1;
381 	unsigned long pfn, block_sz;
382 	u64 phys_addr;
383 
384 	if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
385 		return false;
386 
387 	block_sz = memory_block_size_bytes();
388 	scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
389 	phys_addr = lmb->base_addr;
390 
391 #ifdef CONFIG_FA_DUMP
392 	/* Don't hot-remove memory that falls in fadump boot memory area */
393 	if (is_fadump_boot_memory_area(phys_addr, block_sz))
394 		return false;
395 #endif
396 
397 	for (i = 0; i < scns_per_block; i++) {
398 		pfn = PFN_DOWN(phys_addr);
399 		if (!pfn_present(pfn))
400 			continue;
401 
402 		rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
403 		phys_addr += MIN_MEMORY_BLOCK_SIZE;
404 	}
405 
406 	return rc ? true : false;
407 }
408 
409 static int dlpar_add_lmb(struct drmem_lmb *);
410 
dlpar_remove_lmb(struct drmem_lmb * lmb)411 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
412 {
413 	unsigned long block_sz;
414 	int nid, rc;
415 
416 	if (!lmb_is_removable(lmb))
417 		return -EINVAL;
418 
419 	rc = dlpar_offline_lmb(lmb);
420 	if (rc)
421 		return rc;
422 
423 	block_sz = pseries_memory_block_size();
424 	nid = memory_add_physaddr_to_nid(lmb->base_addr);
425 
426 	remove_memory(nid, lmb->base_addr, block_sz);
427 
428 	/* Update memory regions for memory remove */
429 	memblock_remove(lmb->base_addr, block_sz);
430 
431 	dlpar_remove_device_tree_lmb(lmb);
432 	return 0;
433 }
434 
dlpar_memory_remove_by_count(u32 lmbs_to_remove)435 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
436 {
437 	struct drmem_lmb *lmb;
438 	int lmbs_removed = 0;
439 	int lmbs_available = 0;
440 	int rc;
441 
442 	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
443 
444 	if (lmbs_to_remove == 0)
445 		return -EINVAL;
446 
447 	/* Validate that there are enough LMBs to satisfy the request */
448 	for_each_drmem_lmb(lmb) {
449 		if (lmb_is_removable(lmb))
450 			lmbs_available++;
451 
452 		if (lmbs_available == lmbs_to_remove)
453 			break;
454 	}
455 
456 	if (lmbs_available < lmbs_to_remove) {
457 		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
458 			lmbs_available, lmbs_to_remove);
459 		return -EINVAL;
460 	}
461 
462 	for_each_drmem_lmb(lmb) {
463 		rc = dlpar_remove_lmb(lmb);
464 		if (rc)
465 			continue;
466 
467 		/* Mark this lmb so we can add it later if all of the
468 		 * requested LMBs cannot be removed.
469 		 */
470 		drmem_mark_lmb_reserved(lmb);
471 
472 		lmbs_removed++;
473 		if (lmbs_removed == lmbs_to_remove)
474 			break;
475 	}
476 
477 	if (lmbs_removed != lmbs_to_remove) {
478 		pr_err("Memory hot-remove failed, adding LMB's back\n");
479 
480 		for_each_drmem_lmb(lmb) {
481 			if (!drmem_lmb_reserved(lmb))
482 				continue;
483 
484 			rc = dlpar_add_lmb(lmb);
485 			if (rc)
486 				pr_err("Failed to add LMB back, drc index %x\n",
487 				       lmb->drc_index);
488 
489 			drmem_remove_lmb_reservation(lmb);
490 		}
491 
492 		rc = -EINVAL;
493 	} else {
494 		for_each_drmem_lmb(lmb) {
495 			if (!drmem_lmb_reserved(lmb))
496 				continue;
497 
498 			dlpar_release_drc(lmb->drc_index);
499 			pr_info("Memory at %llx was hot-removed\n",
500 				lmb->base_addr);
501 
502 			drmem_remove_lmb_reservation(lmb);
503 		}
504 		rc = 0;
505 	}
506 
507 	return rc;
508 }
509 
dlpar_memory_remove_by_index(u32 drc_index)510 static int dlpar_memory_remove_by_index(u32 drc_index)
511 {
512 	struct drmem_lmb *lmb;
513 	int lmb_found;
514 	int rc;
515 
516 	pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
517 
518 	lmb_found = 0;
519 	for_each_drmem_lmb(lmb) {
520 		if (lmb->drc_index == drc_index) {
521 			lmb_found = 1;
522 			rc = dlpar_remove_lmb(lmb);
523 			if (!rc)
524 				dlpar_release_drc(lmb->drc_index);
525 
526 			break;
527 		}
528 	}
529 
530 	if (!lmb_found)
531 		rc = -EINVAL;
532 
533 	if (rc)
534 		pr_info("Failed to hot-remove memory at %llx\n",
535 			lmb->base_addr);
536 	else
537 		pr_info("Memory at %llx was hot-removed\n", lmb->base_addr);
538 
539 	return rc;
540 }
541 
dlpar_memory_readd_by_index(u32 drc_index)542 static int dlpar_memory_readd_by_index(u32 drc_index)
543 {
544 	struct drmem_lmb *lmb;
545 	int lmb_found;
546 	int rc;
547 
548 	pr_info("Attempting to update LMB, drc index %x\n", drc_index);
549 
550 	lmb_found = 0;
551 	for_each_drmem_lmb(lmb) {
552 		if (lmb->drc_index == drc_index) {
553 			lmb_found = 1;
554 			rc = dlpar_remove_lmb(lmb);
555 			if (!rc) {
556 				rc = dlpar_add_lmb(lmb);
557 				if (rc)
558 					dlpar_release_drc(lmb->drc_index);
559 			}
560 			break;
561 		}
562 	}
563 
564 	if (!lmb_found)
565 		rc = -EINVAL;
566 
567 	if (rc)
568 		pr_info("Failed to update memory at %llx\n",
569 			lmb->base_addr);
570 	else
571 		pr_info("Memory at %llx was updated\n", lmb->base_addr);
572 
573 	return rc;
574 }
575 
dlpar_memory_remove_by_ic(u32 lmbs_to_remove,u32 drc_index)576 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
577 {
578 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
579 	int lmbs_available = 0;
580 	int rc;
581 
582 	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
583 		lmbs_to_remove, drc_index);
584 
585 	if (lmbs_to_remove == 0)
586 		return -EINVAL;
587 
588 	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
589 	if (rc)
590 		return -EINVAL;
591 
592 	/* Validate that there are enough LMBs to satisfy the request */
593 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
594 		if (lmb->flags & DRCONF_MEM_RESERVED)
595 			break;
596 
597 		lmbs_available++;
598 	}
599 
600 	if (lmbs_available < lmbs_to_remove)
601 		return -EINVAL;
602 
603 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
604 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
605 			continue;
606 
607 		rc = dlpar_remove_lmb(lmb);
608 		if (rc)
609 			break;
610 
611 		drmem_mark_lmb_reserved(lmb);
612 	}
613 
614 	if (rc) {
615 		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
616 
617 
618 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
619 			if (!drmem_lmb_reserved(lmb))
620 				continue;
621 
622 			rc = dlpar_add_lmb(lmb);
623 			if (rc)
624 				pr_err("Failed to add LMB, drc index %x\n",
625 				       lmb->drc_index);
626 
627 			drmem_remove_lmb_reservation(lmb);
628 		}
629 		rc = -EINVAL;
630 	} else {
631 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
632 			if (!drmem_lmb_reserved(lmb))
633 				continue;
634 
635 			dlpar_release_drc(lmb->drc_index);
636 			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
637 				lmb->base_addr, lmb->drc_index);
638 
639 			drmem_remove_lmb_reservation(lmb);
640 		}
641 	}
642 
643 	return rc;
644 }
645 
646 #else
pseries_remove_memblock(unsigned long base,unsigned int memblock_size)647 static inline int pseries_remove_memblock(unsigned long base,
648 					  unsigned int memblock_size)
649 {
650 	return -EOPNOTSUPP;
651 }
pseries_remove_mem_node(struct device_node * np)652 static inline int pseries_remove_mem_node(struct device_node *np)
653 {
654 	return 0;
655 }
dlpar_memory_remove(struct pseries_hp_errorlog * hp_elog)656 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
657 {
658 	return -EOPNOTSUPP;
659 }
dlpar_remove_lmb(struct drmem_lmb * lmb)660 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
661 {
662 	return -EOPNOTSUPP;
663 }
dlpar_memory_remove_by_count(u32 lmbs_to_remove)664 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
665 {
666 	return -EOPNOTSUPP;
667 }
dlpar_memory_remove_by_index(u32 drc_index)668 static int dlpar_memory_remove_by_index(u32 drc_index)
669 {
670 	return -EOPNOTSUPP;
671 }
dlpar_memory_readd_by_index(u32 drc_index)672 static int dlpar_memory_readd_by_index(u32 drc_index)
673 {
674 	return -EOPNOTSUPP;
675 }
676 
dlpar_memory_remove_by_ic(u32 lmbs_to_remove,u32 drc_index)677 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
678 {
679 	return -EOPNOTSUPP;
680 }
681 #endif /* CONFIG_MEMORY_HOTREMOVE */
682 
dlpar_add_lmb(struct drmem_lmb * lmb)683 static int dlpar_add_lmb(struct drmem_lmb *lmb)
684 {
685 	unsigned long block_sz;
686 	int nid, rc;
687 
688 	if (lmb->flags & DRCONF_MEM_ASSIGNED)
689 		return -EINVAL;
690 
691 	rc = dlpar_add_device_tree_lmb(lmb);
692 	if (rc) {
693 		pr_err("Couldn't update device tree for drc index %x\n",
694 		       lmb->drc_index);
695 		dlpar_release_drc(lmb->drc_index);
696 		return rc;
697 	}
698 
699 	block_sz = memory_block_size_bytes();
700 
701 	/* Find the node id for this address */
702 	nid = memory_add_physaddr_to_nid(lmb->base_addr);
703 
704 	/* Add the memory */
705 	rc = add_memory(nid, lmb->base_addr, block_sz);
706 	if (rc) {
707 		dlpar_remove_device_tree_lmb(lmb);
708 		return rc;
709 	}
710 
711 	rc = dlpar_online_lmb(lmb);
712 	if (rc) {
713 		remove_memory(nid, lmb->base_addr, block_sz);
714 		dlpar_remove_device_tree_lmb(lmb);
715 	} else {
716 		lmb->flags |= DRCONF_MEM_ASSIGNED;
717 	}
718 
719 	return rc;
720 }
721 
dlpar_memory_add_by_count(u32 lmbs_to_add)722 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
723 {
724 	struct drmem_lmb *lmb;
725 	int lmbs_available = 0;
726 	int lmbs_added = 0;
727 	int rc;
728 
729 	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
730 
731 	if (lmbs_to_add == 0)
732 		return -EINVAL;
733 
734 	/* Validate that there are enough LMBs to satisfy the request */
735 	for_each_drmem_lmb(lmb) {
736 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
737 			lmbs_available++;
738 
739 		if (lmbs_available == lmbs_to_add)
740 			break;
741 	}
742 
743 	if (lmbs_available < lmbs_to_add)
744 		return -EINVAL;
745 
746 	for_each_drmem_lmb(lmb) {
747 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
748 			continue;
749 
750 		rc = dlpar_acquire_drc(lmb->drc_index);
751 		if (rc)
752 			continue;
753 
754 		rc = dlpar_add_lmb(lmb);
755 		if (rc) {
756 			dlpar_release_drc(lmb->drc_index);
757 			continue;
758 		}
759 
760 		/* Mark this lmb so we can remove it later if all of the
761 		 * requested LMBs cannot be added.
762 		 */
763 		drmem_mark_lmb_reserved(lmb);
764 
765 		lmbs_added++;
766 		if (lmbs_added == lmbs_to_add)
767 			break;
768 	}
769 
770 	if (lmbs_added != lmbs_to_add) {
771 		pr_err("Memory hot-add failed, removing any added LMBs\n");
772 
773 		for_each_drmem_lmb(lmb) {
774 			if (!drmem_lmb_reserved(lmb))
775 				continue;
776 
777 			rc = dlpar_remove_lmb(lmb);
778 			if (rc)
779 				pr_err("Failed to remove LMB, drc index %x\n",
780 				       lmb->drc_index);
781 			else
782 				dlpar_release_drc(lmb->drc_index);
783 
784 			drmem_remove_lmb_reservation(lmb);
785 		}
786 		rc = -EINVAL;
787 	} else {
788 		for_each_drmem_lmb(lmb) {
789 			if (!drmem_lmb_reserved(lmb))
790 				continue;
791 
792 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
793 				lmb->base_addr, lmb->drc_index);
794 			drmem_remove_lmb_reservation(lmb);
795 		}
796 		rc = 0;
797 	}
798 
799 	return rc;
800 }
801 
dlpar_memory_add_by_index(u32 drc_index)802 static int dlpar_memory_add_by_index(u32 drc_index)
803 {
804 	struct drmem_lmb *lmb;
805 	int rc, lmb_found;
806 
807 	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
808 
809 	lmb_found = 0;
810 	for_each_drmem_lmb(lmb) {
811 		if (lmb->drc_index == drc_index) {
812 			lmb_found = 1;
813 			rc = dlpar_acquire_drc(lmb->drc_index);
814 			if (!rc) {
815 				rc = dlpar_add_lmb(lmb);
816 				if (rc)
817 					dlpar_release_drc(lmb->drc_index);
818 			}
819 
820 			break;
821 		}
822 	}
823 
824 	if (!lmb_found)
825 		rc = -EINVAL;
826 
827 	if (rc)
828 		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
829 	else
830 		pr_info("Memory at %llx (drc index %x) was hot-added\n",
831 			lmb->base_addr, drc_index);
832 
833 	return rc;
834 }
835 
dlpar_memory_add_by_ic(u32 lmbs_to_add,u32 drc_index)836 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
837 {
838 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
839 	int lmbs_available = 0;
840 	int rc;
841 
842 	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
843 		lmbs_to_add, drc_index);
844 
845 	if (lmbs_to_add == 0)
846 		return -EINVAL;
847 
848 	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
849 	if (rc)
850 		return -EINVAL;
851 
852 	/* Validate that the LMBs in this range are not reserved */
853 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
854 		if (lmb->flags & DRCONF_MEM_RESERVED)
855 			break;
856 
857 		lmbs_available++;
858 	}
859 
860 	if (lmbs_available < lmbs_to_add)
861 		return -EINVAL;
862 
863 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
864 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
865 			continue;
866 
867 		rc = dlpar_acquire_drc(lmb->drc_index);
868 		if (rc)
869 			break;
870 
871 		rc = dlpar_add_lmb(lmb);
872 		if (rc) {
873 			dlpar_release_drc(lmb->drc_index);
874 			break;
875 		}
876 
877 		drmem_mark_lmb_reserved(lmb);
878 	}
879 
880 	if (rc) {
881 		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
882 
883 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
884 			if (!drmem_lmb_reserved(lmb))
885 				continue;
886 
887 			rc = dlpar_remove_lmb(lmb);
888 			if (rc)
889 				pr_err("Failed to remove LMB, drc index %x\n",
890 				       lmb->drc_index);
891 			else
892 				dlpar_release_drc(lmb->drc_index);
893 
894 			drmem_remove_lmb_reservation(lmb);
895 		}
896 		rc = -EINVAL;
897 	} else {
898 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
899 			if (!drmem_lmb_reserved(lmb))
900 				continue;
901 
902 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
903 				lmb->base_addr, lmb->drc_index);
904 			drmem_remove_lmb_reservation(lmb);
905 		}
906 	}
907 
908 	return rc;
909 }
910 
dlpar_memory(struct pseries_hp_errorlog * hp_elog)911 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
912 {
913 	u32 count, drc_index;
914 	int rc;
915 
916 	lock_device_hotplug();
917 
918 	switch (hp_elog->action) {
919 	case PSERIES_HP_ELOG_ACTION_ADD:
920 		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
921 			count = hp_elog->_drc_u.drc_count;
922 			rc = dlpar_memory_add_by_count(count);
923 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
924 			drc_index = hp_elog->_drc_u.drc_index;
925 			rc = dlpar_memory_add_by_index(drc_index);
926 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
927 			count = hp_elog->_drc_u.ic.count;
928 			drc_index = hp_elog->_drc_u.ic.index;
929 			rc = dlpar_memory_add_by_ic(count, drc_index);
930 		} else {
931 			rc = -EINVAL;
932 		}
933 
934 		break;
935 	case PSERIES_HP_ELOG_ACTION_REMOVE:
936 		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
937 			count = hp_elog->_drc_u.drc_count;
938 			rc = dlpar_memory_remove_by_count(count);
939 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
940 			drc_index = hp_elog->_drc_u.drc_index;
941 			rc = dlpar_memory_remove_by_index(drc_index);
942 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
943 			count = hp_elog->_drc_u.ic.count;
944 			drc_index = hp_elog->_drc_u.ic.index;
945 			rc = dlpar_memory_remove_by_ic(count, drc_index);
946 		} else {
947 			rc = -EINVAL;
948 		}
949 
950 		break;
951 	case PSERIES_HP_ELOG_ACTION_READD:
952 		drc_index = hp_elog->_drc_u.drc_index;
953 		rc = dlpar_memory_readd_by_index(drc_index);
954 		break;
955 	default:
956 		pr_err("Invalid action (%d) specified\n", hp_elog->action);
957 		rc = -EINVAL;
958 		break;
959 	}
960 
961 	unlock_device_hotplug();
962 	return rc;
963 }
964 
pseries_add_mem_node(struct device_node * np)965 static int pseries_add_mem_node(struct device_node *np)
966 {
967 	const char *type;
968 	const __be32 *regs;
969 	unsigned long base;
970 	unsigned int lmb_size;
971 	int ret = -EINVAL;
972 
973 	/*
974 	 * Check to see if we are actually adding memory
975 	 */
976 	type = of_get_property(np, "device_type", NULL);
977 	if (type == NULL || strcmp(type, "memory") != 0)
978 		return 0;
979 
980 	/*
981 	 * Find the base and size of the memblock
982 	 */
983 	regs = of_get_property(np, "reg", NULL);
984 	if (!regs)
985 		return ret;
986 
987 	base = be64_to_cpu(*(unsigned long *)regs);
988 	lmb_size = be32_to_cpu(regs[3]);
989 
990 	/*
991 	 * Update memory region to represent the memory add
992 	 */
993 	ret = memblock_add(base, lmb_size);
994 	return (ret < 0) ? -EINVAL : 0;
995 }
996 
pseries_update_drconf_memory(struct of_reconfig_data * pr)997 static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
998 {
999 	struct of_drconf_cell_v1 *new_drmem, *old_drmem;
1000 	unsigned long memblock_size;
1001 	u32 entries;
1002 	__be32 *p;
1003 	int i, rc = -EINVAL;
1004 
1005 	if (rtas_hp_event)
1006 		return 0;
1007 
1008 	memblock_size = pseries_memory_block_size();
1009 	if (!memblock_size)
1010 		return -EINVAL;
1011 
1012 	p = (__be32 *) pr->old_prop->value;
1013 	if (!p)
1014 		return -EINVAL;
1015 
1016 	/* The first int of the property is the number of lmb's described
1017 	 * by the property. This is followed by an array of of_drconf_cell
1018 	 * entries. Get the number of entries and skip to the array of
1019 	 * of_drconf_cell's.
1020 	 */
1021 	entries = be32_to_cpu(*p++);
1022 	old_drmem = (struct of_drconf_cell_v1 *)p;
1023 
1024 	p = (__be32 *)pr->prop->value;
1025 	p++;
1026 	new_drmem = (struct of_drconf_cell_v1 *)p;
1027 
1028 	for (i = 0; i < entries; i++) {
1029 		if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
1030 		    (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
1031 			rc = pseries_remove_memblock(
1032 				be64_to_cpu(old_drmem[i].base_addr),
1033 						     memblock_size);
1034 			break;
1035 		} else if ((!(be32_to_cpu(old_drmem[i].flags) &
1036 			    DRCONF_MEM_ASSIGNED)) &&
1037 			    (be32_to_cpu(new_drmem[i].flags) &
1038 			    DRCONF_MEM_ASSIGNED)) {
1039 			rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
1040 					  memblock_size);
1041 			rc = (rc < 0) ? -EINVAL : 0;
1042 			break;
1043 		}
1044 	}
1045 	return rc;
1046 }
1047 
pseries_memory_notifier(struct notifier_block * nb,unsigned long action,void * data)1048 static int pseries_memory_notifier(struct notifier_block *nb,
1049 				   unsigned long action, void *data)
1050 {
1051 	struct of_reconfig_data *rd = data;
1052 	int err = 0;
1053 
1054 	switch (action) {
1055 	case OF_RECONFIG_ATTACH_NODE:
1056 		err = pseries_add_mem_node(rd->dn);
1057 		break;
1058 	case OF_RECONFIG_DETACH_NODE:
1059 		err = pseries_remove_mem_node(rd->dn);
1060 		break;
1061 	case OF_RECONFIG_UPDATE_PROPERTY:
1062 		if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
1063 			err = pseries_update_drconf_memory(rd);
1064 		break;
1065 	}
1066 	return notifier_from_errno(err);
1067 }
1068 
1069 static struct notifier_block pseries_mem_nb = {
1070 	.notifier_call = pseries_memory_notifier,
1071 };
1072 
pseries_memory_hotplug_init(void)1073 static int __init pseries_memory_hotplug_init(void)
1074 {
1075 	if (firmware_has_feature(FW_FEATURE_LPAR))
1076 		of_reconfig_notifier_register(&pseries_mem_nb);
1077 
1078 	return 0;
1079 }
1080 machine_device_initcall(pseries, pseries_memory_hotplug_init);
1081