1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4   */
5  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6  #include <linux/moduleparam.h>
7  #include <linux/vmalloc.h>
8  #include <linux/device.h>
9  #include <linux/ndctl.h>
10  #include <linux/slab.h>
11  #include <linux/io.h>
12  #include <linux/fs.h>
13  #include <linux/mm.h>
14  #include "nd-core.h"
15  #include "label.h"
16  #include "pmem.h"
17  #include "nd.h"
18  
19  static DEFINE_IDA(dimm_ida);
20  
21  /*
22   * Retrieve bus and dimm handle and return if this bus supports
23   * get_config_data commands
24   */
nvdimm_check_config_data(struct device * dev)25  int nvdimm_check_config_data(struct device *dev)
26  {
27  	struct nvdimm *nvdimm = to_nvdimm(dev);
28  
29  	if (!nvdimm->cmd_mask ||
30  	    !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
31  		if (test_bit(NDD_LABELING, &nvdimm->flags))
32  			return -ENXIO;
33  		else
34  			return -ENOTTY;
35  	}
36  
37  	return 0;
38  }
39  
validate_dimm(struct nvdimm_drvdata * ndd)40  static int validate_dimm(struct nvdimm_drvdata *ndd)
41  {
42  	int rc;
43  
44  	if (!ndd)
45  		return -EINVAL;
46  
47  	rc = nvdimm_check_config_data(ndd->dev);
48  	if (rc)
49  		dev_dbg(ndd->dev, "%ps: %s error: %d\n",
50  				__builtin_return_address(0), __func__, rc);
51  	return rc;
52  }
53  
54  /**
55   * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
56   * @nvdimm: dimm to initialize
57   */
nvdimm_init_nsarea(struct nvdimm_drvdata * ndd)58  int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
59  {
60  	struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
61  	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
62  	struct nvdimm_bus_descriptor *nd_desc;
63  	int rc = validate_dimm(ndd);
64  	int cmd_rc = 0;
65  
66  	if (rc)
67  		return rc;
68  
69  	if (cmd->config_size)
70  		return 0; /* already valid */
71  
72  	memset(cmd, 0, sizeof(*cmd));
73  	nd_desc = nvdimm_bus->nd_desc;
74  	rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
75  			ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
76  	if (rc < 0)
77  		return rc;
78  	return cmd_rc;
79  }
80  
nvdimm_get_config_data(struct nvdimm_drvdata * ndd,void * buf,size_t offset,size_t len)81  int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
82  			   size_t offset, size_t len)
83  {
84  	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
85  	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
86  	int rc = validate_dimm(ndd), cmd_rc = 0;
87  	struct nd_cmd_get_config_data_hdr *cmd;
88  	size_t max_cmd_size, buf_offset;
89  
90  	if (rc)
91  		return rc;
92  
93  	if (offset + len > ndd->nsarea.config_size)
94  		return -ENXIO;
95  
96  	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
97  	cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
98  	if (!cmd)
99  		return -ENOMEM;
100  
101  	for (buf_offset = 0; len;
102  	     len -= cmd->in_length, buf_offset += cmd->in_length) {
103  		size_t cmd_size;
104  
105  		cmd->in_offset = offset + buf_offset;
106  		cmd->in_length = min(max_cmd_size, len);
107  
108  		cmd_size = sizeof(*cmd) + cmd->in_length;
109  
110  		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
111  				ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
112  		if (rc < 0)
113  			break;
114  		if (cmd_rc < 0) {
115  			rc = cmd_rc;
116  			break;
117  		}
118  
119  		/* out_buf should be valid, copy it into our output buffer */
120  		memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
121  	}
122  	kvfree(cmd);
123  
124  	return rc;
125  }
126  
nvdimm_set_config_data(struct nvdimm_drvdata * ndd,size_t offset,void * buf,size_t len)127  int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
128  		void *buf, size_t len)
129  {
130  	size_t max_cmd_size, buf_offset;
131  	struct nd_cmd_set_config_hdr *cmd;
132  	int rc = validate_dimm(ndd), cmd_rc = 0;
133  	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
134  	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
135  
136  	if (rc)
137  		return rc;
138  
139  	if (offset + len > ndd->nsarea.config_size)
140  		return -ENXIO;
141  
142  	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
143  	cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
144  	if (!cmd)
145  		return -ENOMEM;
146  
147  	for (buf_offset = 0; len; len -= cmd->in_length,
148  			buf_offset += cmd->in_length) {
149  		size_t cmd_size;
150  
151  		cmd->in_offset = offset + buf_offset;
152  		cmd->in_length = min(max_cmd_size, len);
153  		memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
154  
155  		/* status is output in the last 4-bytes of the command buffer */
156  		cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
157  
158  		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
159  				ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
160  		if (rc < 0)
161  			break;
162  		if (cmd_rc < 0) {
163  			rc = cmd_rc;
164  			break;
165  		}
166  	}
167  	kvfree(cmd);
168  
169  	return rc;
170  }
171  
nvdimm_set_labeling(struct device * dev)172  void nvdimm_set_labeling(struct device *dev)
173  {
174  	struct nvdimm *nvdimm = to_nvdimm(dev);
175  
176  	set_bit(NDD_LABELING, &nvdimm->flags);
177  }
178  
nvdimm_set_locked(struct device * dev)179  void nvdimm_set_locked(struct device *dev)
180  {
181  	struct nvdimm *nvdimm = to_nvdimm(dev);
182  
183  	set_bit(NDD_LOCKED, &nvdimm->flags);
184  }
185  
nvdimm_clear_locked(struct device * dev)186  void nvdimm_clear_locked(struct device *dev)
187  {
188  	struct nvdimm *nvdimm = to_nvdimm(dev);
189  
190  	clear_bit(NDD_LOCKED, &nvdimm->flags);
191  }
192  
nvdimm_release(struct device * dev)193  static void nvdimm_release(struct device *dev)
194  {
195  	struct nvdimm *nvdimm = to_nvdimm(dev);
196  
197  	ida_simple_remove(&dimm_ida, nvdimm->id);
198  	kfree(nvdimm);
199  }
200  
to_nvdimm(struct device * dev)201  struct nvdimm *to_nvdimm(struct device *dev)
202  {
203  	struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
204  
205  	WARN_ON(!is_nvdimm(dev));
206  	return nvdimm;
207  }
208  EXPORT_SYMBOL_GPL(to_nvdimm);
209  
to_ndd(struct nd_mapping * nd_mapping)210  struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
211  {
212  	struct nvdimm *nvdimm = nd_mapping->nvdimm;
213  
214  	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
215  
216  	return dev_get_drvdata(&nvdimm->dev);
217  }
218  EXPORT_SYMBOL(to_ndd);
219  
nvdimm_drvdata_release(struct kref * kref)220  void nvdimm_drvdata_release(struct kref *kref)
221  {
222  	struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
223  	struct device *dev = ndd->dev;
224  	struct resource *res, *_r;
225  
226  	dev_dbg(dev, "trace\n");
227  	nvdimm_bus_lock(dev);
228  	for_each_dpa_resource_safe(ndd, res, _r)
229  		nvdimm_free_dpa(ndd, res);
230  	nvdimm_bus_unlock(dev);
231  
232  	kvfree(ndd->data);
233  	kfree(ndd);
234  	put_device(dev);
235  }
236  
get_ndd(struct nvdimm_drvdata * ndd)237  void get_ndd(struct nvdimm_drvdata *ndd)
238  {
239  	kref_get(&ndd->kref);
240  }
241  
put_ndd(struct nvdimm_drvdata * ndd)242  void put_ndd(struct nvdimm_drvdata *ndd)
243  {
244  	if (ndd)
245  		kref_put(&ndd->kref, nvdimm_drvdata_release);
246  }
247  
nvdimm_name(struct nvdimm * nvdimm)248  const char *nvdimm_name(struct nvdimm *nvdimm)
249  {
250  	return dev_name(&nvdimm->dev);
251  }
252  EXPORT_SYMBOL_GPL(nvdimm_name);
253  
nvdimm_kobj(struct nvdimm * nvdimm)254  struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
255  {
256  	return &nvdimm->dev.kobj;
257  }
258  EXPORT_SYMBOL_GPL(nvdimm_kobj);
259  
nvdimm_cmd_mask(struct nvdimm * nvdimm)260  unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
261  {
262  	return nvdimm->cmd_mask;
263  }
264  EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
265  
nvdimm_provider_data(struct nvdimm * nvdimm)266  void *nvdimm_provider_data(struct nvdimm *nvdimm)
267  {
268  	if (nvdimm)
269  		return nvdimm->provider_data;
270  	return NULL;
271  }
272  EXPORT_SYMBOL_GPL(nvdimm_provider_data);
273  
commands_show(struct device * dev,struct device_attribute * attr,char * buf)274  static ssize_t commands_show(struct device *dev,
275  		struct device_attribute *attr, char *buf)
276  {
277  	struct nvdimm *nvdimm = to_nvdimm(dev);
278  	int cmd, len = 0;
279  
280  	if (!nvdimm->cmd_mask)
281  		return sprintf(buf, "\n");
282  
283  	for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
284  		len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
285  	len += sprintf(buf + len, "\n");
286  	return len;
287  }
288  static DEVICE_ATTR_RO(commands);
289  
flags_show(struct device * dev,struct device_attribute * attr,char * buf)290  static ssize_t flags_show(struct device *dev,
291  		struct device_attribute *attr, char *buf)
292  {
293  	struct nvdimm *nvdimm = to_nvdimm(dev);
294  
295  	return sprintf(buf, "%s%s\n",
296  			test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
297  			test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
298  }
299  static DEVICE_ATTR_RO(flags);
300  
state_show(struct device * dev,struct device_attribute * attr,char * buf)301  static ssize_t state_show(struct device *dev, struct device_attribute *attr,
302  		char *buf)
303  {
304  	struct nvdimm *nvdimm = to_nvdimm(dev);
305  
306  	/*
307  	 * The state may be in the process of changing, userspace should
308  	 * quiesce probing if it wants a static answer
309  	 */
310  	nvdimm_bus_lock(dev);
311  	nvdimm_bus_unlock(dev);
312  	return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
313  			? "active" : "idle");
314  }
315  static DEVICE_ATTR_RO(state);
316  
__available_slots_show(struct nvdimm_drvdata * ndd,char * buf)317  static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
318  {
319  	struct device *dev;
320  	ssize_t rc;
321  	u32 nfree;
322  
323  	if (!ndd)
324  		return -ENXIO;
325  
326  	dev = ndd->dev;
327  	nvdimm_bus_lock(dev);
328  	nfree = nd_label_nfree(ndd);
329  	if (nfree - 1 > nfree) {
330  		dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
331  		nfree = 0;
332  	} else
333  		nfree--;
334  	rc = sprintf(buf, "%d\n", nfree);
335  	nvdimm_bus_unlock(dev);
336  	return rc;
337  }
338  
available_slots_show(struct device * dev,struct device_attribute * attr,char * buf)339  static ssize_t available_slots_show(struct device *dev,
340  				    struct device_attribute *attr, char *buf)
341  {
342  	ssize_t rc;
343  
344  	device_lock(dev);
345  	rc = __available_slots_show(dev_get_drvdata(dev), buf);
346  	device_unlock(dev);
347  
348  	return rc;
349  }
350  static DEVICE_ATTR_RO(available_slots);
351  
security_show(struct device * dev,struct device_attribute * attr,char * buf)352  static ssize_t security_show(struct device *dev,
353  			     struct device_attribute *attr, char *buf)
354  {
355  	struct nvdimm *nvdimm = to_nvdimm(dev);
356  
357  	/*
358  	 * For the test version we need to poll the "hardware" in order
359  	 * to get the updated status for unlock testing.
360  	 */
361  	if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
362  		nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
363  
364  	if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
365  		return sprintf(buf, "overwrite\n");
366  	if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
367  		return sprintf(buf, "disabled\n");
368  	if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
369  		return sprintf(buf, "unlocked\n");
370  	if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
371  		return sprintf(buf, "locked\n");
372  	return -ENOTTY;
373  }
374  
frozen_show(struct device * dev,struct device_attribute * attr,char * buf)375  static ssize_t frozen_show(struct device *dev,
376  		struct device_attribute *attr, char *buf)
377  {
378  	struct nvdimm *nvdimm = to_nvdimm(dev);
379  
380  	return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
381  				&nvdimm->sec.flags));
382  }
383  static DEVICE_ATTR_RO(frozen);
384  
security_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)385  static ssize_t security_store(struct device *dev,
386  		struct device_attribute *attr, const char *buf, size_t len)
387  
388  {
389  	ssize_t rc;
390  
391  	/*
392  	 * Require all userspace triggered security management to be
393  	 * done while probing is idle and the DIMM is not in active use
394  	 * in any region.
395  	 */
396  	device_lock(dev);
397  	nvdimm_bus_lock(dev);
398  	wait_nvdimm_bus_probe_idle(dev);
399  	rc = nvdimm_security_store(dev, buf, len);
400  	nvdimm_bus_unlock(dev);
401  	device_unlock(dev);
402  
403  	return rc;
404  }
405  static DEVICE_ATTR_RW(security);
406  
407  static struct attribute *nvdimm_attributes[] = {
408  	&dev_attr_state.attr,
409  	&dev_attr_flags.attr,
410  	&dev_attr_commands.attr,
411  	&dev_attr_available_slots.attr,
412  	&dev_attr_security.attr,
413  	&dev_attr_frozen.attr,
414  	NULL,
415  };
416  
nvdimm_visible(struct kobject * kobj,struct attribute * a,int n)417  static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
418  {
419  	struct device *dev = container_of(kobj, typeof(*dev), kobj);
420  	struct nvdimm *nvdimm = to_nvdimm(dev);
421  
422  	if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
423  		return a->mode;
424  	if (!nvdimm->sec.flags)
425  		return 0;
426  
427  	if (a == &dev_attr_security.attr) {
428  		/* Are there any state mutation ops (make writable)? */
429  		if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
430  				|| nvdimm->sec.ops->change_key
431  				|| nvdimm->sec.ops->erase
432  				|| nvdimm->sec.ops->overwrite)
433  			return a->mode;
434  		return 0444;
435  	}
436  
437  	if (nvdimm->sec.ops->freeze)
438  		return a->mode;
439  	return 0;
440  }
441  
442  static const struct attribute_group nvdimm_attribute_group = {
443  	.attrs = nvdimm_attributes,
444  	.is_visible = nvdimm_visible,
445  };
446  
result_show(struct device * dev,struct device_attribute * attr,char * buf)447  static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
448  {
449  	struct nvdimm *nvdimm = to_nvdimm(dev);
450  	enum nvdimm_fwa_result result;
451  
452  	if (!nvdimm->fw_ops)
453  		return -EOPNOTSUPP;
454  
455  	nvdimm_bus_lock(dev);
456  	result = nvdimm->fw_ops->activate_result(nvdimm);
457  	nvdimm_bus_unlock(dev);
458  
459  	switch (result) {
460  	case NVDIMM_FWA_RESULT_NONE:
461  		return sprintf(buf, "none\n");
462  	case NVDIMM_FWA_RESULT_SUCCESS:
463  		return sprintf(buf, "success\n");
464  	case NVDIMM_FWA_RESULT_FAIL:
465  		return sprintf(buf, "fail\n");
466  	case NVDIMM_FWA_RESULT_NOTSTAGED:
467  		return sprintf(buf, "not_staged\n");
468  	case NVDIMM_FWA_RESULT_NEEDRESET:
469  		return sprintf(buf, "need_reset\n");
470  	default:
471  		return -ENXIO;
472  	}
473  }
474  static DEVICE_ATTR_ADMIN_RO(result);
475  
activate_show(struct device * dev,struct device_attribute * attr,char * buf)476  static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
477  {
478  	struct nvdimm *nvdimm = to_nvdimm(dev);
479  	enum nvdimm_fwa_state state;
480  
481  	if (!nvdimm->fw_ops)
482  		return -EOPNOTSUPP;
483  
484  	nvdimm_bus_lock(dev);
485  	state = nvdimm->fw_ops->activate_state(nvdimm);
486  	nvdimm_bus_unlock(dev);
487  
488  	switch (state) {
489  	case NVDIMM_FWA_IDLE:
490  		return sprintf(buf, "idle\n");
491  	case NVDIMM_FWA_BUSY:
492  		return sprintf(buf, "busy\n");
493  	case NVDIMM_FWA_ARMED:
494  		return sprintf(buf, "armed\n");
495  	default:
496  		return -ENXIO;
497  	}
498  }
499  
activate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)500  static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
501  		const char *buf, size_t len)
502  {
503  	struct nvdimm *nvdimm = to_nvdimm(dev);
504  	enum nvdimm_fwa_trigger arg;
505  	int rc;
506  
507  	if (!nvdimm->fw_ops)
508  		return -EOPNOTSUPP;
509  
510  	if (sysfs_streq(buf, "arm"))
511  		arg = NVDIMM_FWA_ARM;
512  	else if (sysfs_streq(buf, "disarm"))
513  		arg = NVDIMM_FWA_DISARM;
514  	else
515  		return -EINVAL;
516  
517  	nvdimm_bus_lock(dev);
518  	rc = nvdimm->fw_ops->arm(nvdimm, arg);
519  	nvdimm_bus_unlock(dev);
520  
521  	if (rc < 0)
522  		return rc;
523  	return len;
524  }
525  static DEVICE_ATTR_ADMIN_RW(activate);
526  
527  static struct attribute *nvdimm_firmware_attributes[] = {
528  	&dev_attr_activate.attr,
529  	&dev_attr_result.attr,
530  	NULL,
531  };
532  
nvdimm_firmware_visible(struct kobject * kobj,struct attribute * a,int n)533  static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
534  {
535  	struct device *dev = container_of(kobj, typeof(*dev), kobj);
536  	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
537  	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
538  	struct nvdimm *nvdimm = to_nvdimm(dev);
539  	enum nvdimm_fwa_capability cap;
540  
541  	if (!nd_desc->fw_ops)
542  		return 0;
543  	if (!nvdimm->fw_ops)
544  		return 0;
545  
546  	nvdimm_bus_lock(dev);
547  	cap = nd_desc->fw_ops->capability(nd_desc);
548  	nvdimm_bus_unlock(dev);
549  
550  	if (cap < NVDIMM_FWA_CAP_QUIESCE)
551  		return 0;
552  
553  	return a->mode;
554  }
555  
556  static const struct attribute_group nvdimm_firmware_attribute_group = {
557  	.name = "firmware",
558  	.attrs = nvdimm_firmware_attributes,
559  	.is_visible = nvdimm_firmware_visible,
560  };
561  
562  static const struct attribute_group *nvdimm_attribute_groups[] = {
563  	&nd_device_attribute_group,
564  	&nvdimm_attribute_group,
565  	&nvdimm_firmware_attribute_group,
566  	NULL,
567  };
568  
569  static const struct device_type nvdimm_device_type = {
570  	.name = "nvdimm",
571  	.release = nvdimm_release,
572  	.groups = nvdimm_attribute_groups,
573  };
574  
is_nvdimm(const struct device * dev)575  bool is_nvdimm(const struct device *dev)
576  {
577  	return dev->type == &nvdimm_device_type;
578  }
579  
580  static struct lock_class_key nvdimm_key;
581  
__nvdimm_create(struct nvdimm_bus * nvdimm_bus,void * provider_data,const struct attribute_group ** groups,unsigned long flags,unsigned long cmd_mask,int num_flush,struct resource * flush_wpq,const char * dimm_id,const struct nvdimm_security_ops * sec_ops,const struct nvdimm_fw_ops * fw_ops)582  struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
583  		void *provider_data, const struct attribute_group **groups,
584  		unsigned long flags, unsigned long cmd_mask, int num_flush,
585  		struct resource *flush_wpq, const char *dimm_id,
586  		const struct nvdimm_security_ops *sec_ops,
587  		const struct nvdimm_fw_ops *fw_ops)
588  {
589  	struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
590  	struct device *dev;
591  
592  	if (!nvdimm)
593  		return NULL;
594  
595  	nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
596  	if (nvdimm->id < 0) {
597  		kfree(nvdimm);
598  		return NULL;
599  	}
600  
601  	nvdimm->dimm_id = dimm_id;
602  	nvdimm->provider_data = provider_data;
603  	nvdimm->flags = flags;
604  	nvdimm->cmd_mask = cmd_mask;
605  	nvdimm->num_flush = num_flush;
606  	nvdimm->flush_wpq = flush_wpq;
607  	atomic_set(&nvdimm->busy, 0);
608  	dev = &nvdimm->dev;
609  	dev_set_name(dev, "nmem%d", nvdimm->id);
610  	dev->parent = &nvdimm_bus->dev;
611  	dev->type = &nvdimm_device_type;
612  	dev->devt = MKDEV(nvdimm_major, nvdimm->id);
613  	dev->groups = groups;
614  	nvdimm->sec.ops = sec_ops;
615  	nvdimm->fw_ops = fw_ops;
616  	nvdimm->sec.overwrite_tmo = 0;
617  	INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
618  	/*
619  	 * Security state must be initialized before device_add() for
620  	 * attribute visibility.
621  	 */
622  	/* get security state and extended (master) state */
623  	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
624  	nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
625  	device_initialize(dev);
626  	lockdep_set_class(&dev->mutex, &nvdimm_key);
627  	if (test_bit(NDD_REGISTER_SYNC, &flags))
628  		nd_device_register_sync(dev);
629  	else
630  		nd_device_register(dev);
631  
632  	return nvdimm;
633  }
634  EXPORT_SYMBOL_GPL(__nvdimm_create);
635  
nvdimm_delete(struct nvdimm * nvdimm)636  void nvdimm_delete(struct nvdimm *nvdimm)
637  {
638  	struct device *dev = &nvdimm->dev;
639  	bool dev_put = false;
640  
641  	/* We are shutting down. Make state frozen artificially. */
642  	nvdimm_bus_lock(dev);
643  	set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
644  	if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
645  		dev_put = true;
646  	nvdimm_bus_unlock(dev);
647  	cancel_delayed_work_sync(&nvdimm->dwork);
648  	if (dev_put)
649  		put_device(dev);
650  	nd_device_unregister(dev, ND_SYNC);
651  }
652  EXPORT_SYMBOL_GPL(nvdimm_delete);
653  
shutdown_security_notify(void * data)654  static void shutdown_security_notify(void *data)
655  {
656  	struct nvdimm *nvdimm = data;
657  
658  	sysfs_put(nvdimm->sec.overwrite_state);
659  }
660  
nvdimm_security_setup_events(struct device * dev)661  int nvdimm_security_setup_events(struct device *dev)
662  {
663  	struct nvdimm *nvdimm = to_nvdimm(dev);
664  
665  	if (!nvdimm->sec.flags || !nvdimm->sec.ops
666  			|| !nvdimm->sec.ops->overwrite)
667  		return 0;
668  	nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
669  	if (!nvdimm->sec.overwrite_state)
670  		return -ENOMEM;
671  
672  	return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
673  }
674  EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
675  
nvdimm_in_overwrite(struct nvdimm * nvdimm)676  int nvdimm_in_overwrite(struct nvdimm *nvdimm)
677  {
678  	return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
679  }
680  EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
681  
nvdimm_security_freeze(struct nvdimm * nvdimm)682  int nvdimm_security_freeze(struct nvdimm *nvdimm)
683  {
684  	int rc;
685  
686  	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
687  
688  	if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
689  		return -EOPNOTSUPP;
690  
691  	if (!nvdimm->sec.flags)
692  		return -EIO;
693  
694  	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
695  		dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
696  		return -EBUSY;
697  	}
698  
699  	rc = nvdimm->sec.ops->freeze(nvdimm);
700  	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
701  
702  	return rc;
703  }
704  
dpa_align(struct nd_region * nd_region)705  static unsigned long dpa_align(struct nd_region *nd_region)
706  {
707  	struct device *dev = &nd_region->dev;
708  
709  	if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
710  				"bus lock required for capacity provision\n"))
711  		return 0;
712  	if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
713  				% nd_region->ndr_mappings,
714  				"invalid region align %#lx mappings: %d\n",
715  				nd_region->align, nd_region->ndr_mappings))
716  		return 0;
717  	return nd_region->align / nd_region->ndr_mappings;
718  }
719  
720  /**
721   * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
722   *			   contiguous unallocated dpa range.
723   * @nd_region: constrain available space check to this reference region
724   * @nd_mapping: container of dpa-resource-root + labels
725   */
nd_pmem_max_contiguous_dpa(struct nd_region * nd_region,struct nd_mapping * nd_mapping)726  resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
727  					   struct nd_mapping *nd_mapping)
728  {
729  	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
730  	struct nvdimm_bus *nvdimm_bus;
731  	resource_size_t max = 0;
732  	struct resource *res;
733  	unsigned long align;
734  
735  	/* if a dimm is disabled the available capacity is zero */
736  	if (!ndd)
737  		return 0;
738  
739  	align = dpa_align(nd_region);
740  	if (!align)
741  		return 0;
742  
743  	nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
744  	if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
745  		return 0;
746  	for_each_dpa_resource(ndd, res) {
747  		resource_size_t start, end;
748  
749  		if (strcmp(res->name, "pmem-reserve") != 0)
750  			continue;
751  		/* trim free space relative to current alignment setting */
752  		start = ALIGN(res->start, align);
753  		end = ALIGN_DOWN(res->end + 1, align) - 1;
754  		if (end < start)
755  			continue;
756  		if (end - start + 1 > max)
757  			max = end - start + 1;
758  	}
759  	release_free_pmem(nvdimm_bus, nd_mapping);
760  	return max;
761  }
762  
763  /**
764   * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
765   * @nd_mapping: container of dpa-resource-root + labels
766   * @nd_region: constrain available space check to this reference region
767   *
768   * Validate that a PMEM label, if present, aligns with the start of an
769   * interleave set.
770   */
nd_pmem_available_dpa(struct nd_region * nd_region,struct nd_mapping * nd_mapping)771  resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
772  				      struct nd_mapping *nd_mapping)
773  {
774  	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
775  	resource_size_t map_start, map_end, busy = 0;
776  	struct resource *res;
777  	unsigned long align;
778  
779  	if (!ndd)
780  		return 0;
781  
782  	align = dpa_align(nd_region);
783  	if (!align)
784  		return 0;
785  
786  	map_start = nd_mapping->start;
787  	map_end = map_start + nd_mapping->size - 1;
788  	for_each_dpa_resource(ndd, res) {
789  		resource_size_t start, end;
790  
791  		start = ALIGN_DOWN(res->start, align);
792  		end = ALIGN(res->end + 1, align) - 1;
793  		if (start >= map_start && start < map_end) {
794  			if (end > map_end) {
795  				nd_dbg_dpa(nd_region, ndd, res,
796  					   "misaligned to iset\n");
797  				return 0;
798  			}
799  			busy += end - start + 1;
800  		} else if (end >= map_start && end <= map_end) {
801  			busy += end - start + 1;
802  		} else if (map_start > start && map_start < end) {
803  			/* total eclipse of the mapping */
804  			busy += nd_mapping->size;
805  		}
806  	}
807  
808  	if (busy < nd_mapping->size)
809  		return ALIGN_DOWN(nd_mapping->size - busy, align);
810  	return 0;
811  }
812  
nvdimm_free_dpa(struct nvdimm_drvdata * ndd,struct resource * res)813  void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
814  {
815  	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
816  	kfree(res->name);
817  	__release_region(&ndd->dpa, res->start, resource_size(res));
818  }
819  
nvdimm_allocate_dpa(struct nvdimm_drvdata * ndd,struct nd_label_id * label_id,resource_size_t start,resource_size_t n)820  struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
821  		struct nd_label_id *label_id, resource_size_t start,
822  		resource_size_t n)
823  {
824  	char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
825  	struct resource *res;
826  
827  	if (!name)
828  		return NULL;
829  
830  	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
831  	res = __request_region(&ndd->dpa, start, n, name, 0);
832  	if (!res)
833  		kfree(name);
834  	return res;
835  }
836  
837  /**
838   * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
839   * @nvdimm: container of dpa-resource-root + labels
840   * @label_id: dpa resource name of the form pmem-<human readable uuid>
841   */
nvdimm_allocated_dpa(struct nvdimm_drvdata * ndd,struct nd_label_id * label_id)842  resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
843  		struct nd_label_id *label_id)
844  {
845  	resource_size_t allocated = 0;
846  	struct resource *res;
847  
848  	for_each_dpa_resource(ndd, res)
849  		if (strcmp(res->name, label_id->id) == 0)
850  			allocated += resource_size(res);
851  
852  	return allocated;
853  }
854  
count_dimms(struct device * dev,void * c)855  static int count_dimms(struct device *dev, void *c)
856  {
857  	int *count = c;
858  
859  	if (is_nvdimm(dev))
860  		(*count)++;
861  	return 0;
862  }
863  
nvdimm_bus_check_dimm_count(struct nvdimm_bus * nvdimm_bus,int dimm_count)864  int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
865  {
866  	int count = 0;
867  	/* Flush any possible dimm registration failures */
868  	nd_synchronize();
869  
870  	device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
871  	dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
872  	if (count != dimm_count)
873  		return -ENXIO;
874  	return 0;
875  }
876  EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
877  
nvdimm_devs_exit(void)878  void __exit nvdimm_devs_exit(void)
879  {
880  	ida_destroy(&dimm_ida);
881  }
882