1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Volume Management Device driver
4  * Copyright (c) 2015, Intel Corporation.
5  */
6 
7 #include <linux/device.h>
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/pci.h>
14 #include <linux/srcu.h>
15 #include <linux/rculist.h>
16 #include <linux/rcupdate.h>
17 
18 #include <asm/irqdomain.h>
19 #include <asm/device.h>
20 #include <asm/msi.h>
21 #include <asm/msidef.h>
22 
23 #define VMD_CFGBAR	0
24 #define VMD_MEMBAR1	2
25 #define VMD_MEMBAR2	4
26 
27 #define PCI_REG_VMCAP		0x40
28 #define BUS_RESTRICT_CAP(vmcap)	(vmcap & 0x1)
29 #define PCI_REG_VMCONFIG	0x44
30 #define BUS_RESTRICT_CFG(vmcfg)	((vmcfg >> 8) & 0x3)
31 #define PCI_REG_VMLOCK		0x70
32 #define MB2_SHADOW_EN(vmlock)	(vmlock & 0x2)
33 
34 enum vmd_features {
35 	/*
36 	 * Device may contain registers which hint the physical location of the
37 	 * membars, in order to allow proper address translation during
38 	 * resource assignment to enable guest virtualization
39 	 */
40 	VMD_FEAT_HAS_MEMBAR_SHADOW	= (1 << 0),
41 
42 	/*
43 	 * Device may provide root port configuration information which limits
44 	 * bus numbering
45 	 */
46 	VMD_FEAT_HAS_BUS_RESTRICTIONS	= (1 << 1),
47 };
48 
49 /*
50  * Lock for manipulating VMD IRQ lists.
51  */
52 static DEFINE_RAW_SPINLOCK(list_lock);
53 
54 /**
55  * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
56  * @node:	list item for parent traversal.
57  * @irq:	back pointer to parent.
58  * @enabled:	true if driver enabled IRQ
59  * @virq:	the virtual IRQ value provided to the requesting driver.
60  *
61  * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
62  * a VMD IRQ using this structure.
63  */
64 struct vmd_irq {
65 	struct list_head	node;
66 	struct vmd_irq_list	*irq;
67 	bool			enabled;
68 	unsigned int		virq;
69 };
70 
71 /**
72  * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
73  * @irq_list:	the list of irq's the VMD one demuxes to.
74  * @srcu:	SRCU struct for local synchronization.
75  * @count:	number of child IRQs assigned to this vector; used to track
76  *		sharing.
77  */
78 struct vmd_irq_list {
79 	struct list_head	irq_list;
80 	struct srcu_struct	srcu;
81 	unsigned int		count;
82 };
83 
84 struct vmd_dev {
85 	struct pci_dev		*dev;
86 
87 	spinlock_t		cfg_lock;
88 	char __iomem		*cfgbar;
89 
90 	int msix_count;
91 	struct vmd_irq_list	*irqs;
92 
93 	struct pci_sysdata	sysdata;
94 	struct resource		resources[3];
95 	struct irq_domain	*irq_domain;
96 	struct pci_bus		*bus;
97 
98 #ifdef CONFIG_X86_DEV_DMA_OPS
99 	struct dma_map_ops	dma_ops;
100 	struct dma_domain	dma_domain;
101 #endif
102 };
103 
vmd_from_bus(struct pci_bus * bus)104 static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
105 {
106 	return container_of(bus->sysdata, struct vmd_dev, sysdata);
107 }
108 
index_from_irqs(struct vmd_dev * vmd,struct vmd_irq_list * irqs)109 static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
110 					   struct vmd_irq_list *irqs)
111 {
112 	return irqs - vmd->irqs;
113 }
114 
115 /*
116  * Drivers managing a device in a VMD domain allocate their own IRQs as before,
117  * but the MSI entry for the hardware it's driving will be programmed with a
118  * destination ID for the VMD MSI-X table.  The VMD muxes interrupts in its
119  * domain into one of its own, and the VMD driver de-muxes these for the
120  * handlers sharing that VMD IRQ.  The vmd irq_domain provides the operations
121  * and irq_chip to set this up.
122  */
vmd_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)123 static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
124 {
125 	struct vmd_irq *vmdirq = data->chip_data;
126 	struct vmd_irq_list *irq = vmdirq->irq;
127 	struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
128 
129 	msg->address_hi = MSI_ADDR_BASE_HI;
130 	msg->address_lo = MSI_ADDR_BASE_LO |
131 			  MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq));
132 	msg->data = 0;
133 }
134 
135 /*
136  * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
137  */
vmd_irq_enable(struct irq_data * data)138 static void vmd_irq_enable(struct irq_data *data)
139 {
140 	struct vmd_irq *vmdirq = data->chip_data;
141 	unsigned long flags;
142 
143 	raw_spin_lock_irqsave(&list_lock, flags);
144 	WARN_ON(vmdirq->enabled);
145 	list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
146 	vmdirq->enabled = true;
147 	raw_spin_unlock_irqrestore(&list_lock, flags);
148 
149 	data->chip->irq_unmask(data);
150 }
151 
vmd_irq_disable(struct irq_data * data)152 static void vmd_irq_disable(struct irq_data *data)
153 {
154 	struct vmd_irq *vmdirq = data->chip_data;
155 	unsigned long flags;
156 
157 	data->chip->irq_mask(data);
158 
159 	raw_spin_lock_irqsave(&list_lock, flags);
160 	if (vmdirq->enabled) {
161 		list_del_rcu(&vmdirq->node);
162 		vmdirq->enabled = false;
163 	}
164 	raw_spin_unlock_irqrestore(&list_lock, flags);
165 }
166 
167 /*
168  * XXX: Stubbed until we develop acceptable way to not create conflicts with
169  * other devices sharing the same vector.
170  */
vmd_irq_set_affinity(struct irq_data * data,const struct cpumask * dest,bool force)171 static int vmd_irq_set_affinity(struct irq_data *data,
172 				const struct cpumask *dest, bool force)
173 {
174 	return -EINVAL;
175 }
176 
177 static struct irq_chip vmd_msi_controller = {
178 	.name			= "VMD-MSI",
179 	.irq_enable		= vmd_irq_enable,
180 	.irq_disable		= vmd_irq_disable,
181 	.irq_compose_msi_msg	= vmd_compose_msi_msg,
182 	.irq_set_affinity	= vmd_irq_set_affinity,
183 };
184 
vmd_get_hwirq(struct msi_domain_info * info,msi_alloc_info_t * arg)185 static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
186 				     msi_alloc_info_t *arg)
187 {
188 	return 0;
189 }
190 
191 /*
192  * XXX: We can be even smarter selecting the best IRQ once we solve the
193  * affinity problem.
194  */
vmd_next_irq(struct vmd_dev * vmd,struct msi_desc * desc)195 static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
196 {
197 	int i, best = 1;
198 	unsigned long flags;
199 
200 	if (vmd->msix_count == 1)
201 		return &vmd->irqs[0];
202 
203 	/*
204 	 * White list for fast-interrupt handlers. All others will share the
205 	 * "slow" interrupt vector.
206 	 */
207 	switch (msi_desc_to_pci_dev(desc)->class) {
208 	case PCI_CLASS_STORAGE_EXPRESS:
209 		break;
210 	default:
211 		return &vmd->irqs[0];
212 	}
213 
214 	raw_spin_lock_irqsave(&list_lock, flags);
215 	for (i = 1; i < vmd->msix_count; i++)
216 		if (vmd->irqs[i].count < vmd->irqs[best].count)
217 			best = i;
218 	vmd->irqs[best].count++;
219 	raw_spin_unlock_irqrestore(&list_lock, flags);
220 
221 	return &vmd->irqs[best];
222 }
223 
vmd_msi_init(struct irq_domain * domain,struct msi_domain_info * info,unsigned int virq,irq_hw_number_t hwirq,msi_alloc_info_t * arg)224 static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
225 			unsigned int virq, irq_hw_number_t hwirq,
226 			msi_alloc_info_t *arg)
227 {
228 	struct msi_desc *desc = arg->desc;
229 	struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
230 	struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
231 	unsigned int index, vector;
232 
233 	if (!vmdirq)
234 		return -ENOMEM;
235 
236 	INIT_LIST_HEAD(&vmdirq->node);
237 	vmdirq->irq = vmd_next_irq(vmd, desc);
238 	vmdirq->virq = virq;
239 	index = index_from_irqs(vmd, vmdirq->irq);
240 	vector = pci_irq_vector(vmd->dev, index);
241 
242 	irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
243 			    handle_untracked_irq, vmd, NULL);
244 	return 0;
245 }
246 
vmd_msi_free(struct irq_domain * domain,struct msi_domain_info * info,unsigned int virq)247 static void vmd_msi_free(struct irq_domain *domain,
248 			struct msi_domain_info *info, unsigned int virq)
249 {
250 	struct vmd_irq *vmdirq = irq_get_chip_data(virq);
251 	unsigned long flags;
252 
253 	synchronize_srcu(&vmdirq->irq->srcu);
254 
255 	/* XXX: Potential optimization to rebalance */
256 	raw_spin_lock_irqsave(&list_lock, flags);
257 	vmdirq->irq->count--;
258 	raw_spin_unlock_irqrestore(&list_lock, flags);
259 
260 	kfree(vmdirq);
261 }
262 
vmd_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * arg)263 static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
264 			   int nvec, msi_alloc_info_t *arg)
265 {
266 	struct pci_dev *pdev = to_pci_dev(dev);
267 	struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
268 
269 	if (nvec > vmd->msix_count)
270 		return vmd->msix_count;
271 
272 	memset(arg, 0, sizeof(*arg));
273 	return 0;
274 }
275 
vmd_set_desc(msi_alloc_info_t * arg,struct msi_desc * desc)276 static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
277 {
278 	arg->desc = desc;
279 }
280 
281 static struct msi_domain_ops vmd_msi_domain_ops = {
282 	.get_hwirq	= vmd_get_hwirq,
283 	.msi_init	= vmd_msi_init,
284 	.msi_free	= vmd_msi_free,
285 	.msi_prepare	= vmd_msi_prepare,
286 	.set_desc	= vmd_set_desc,
287 };
288 
289 static struct msi_domain_info vmd_msi_domain_info = {
290 	.flags		= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
291 			  MSI_FLAG_PCI_MSIX,
292 	.ops		= &vmd_msi_domain_ops,
293 	.chip		= &vmd_msi_controller,
294 };
295 
296 #ifdef CONFIG_X86_DEV_DMA_OPS
297 /*
298  * VMD replaces the requester ID with its own.  DMA mappings for devices in a
299  * VMD domain need to be mapped for the VMD, not the device requiring
300  * the mapping.
301  */
to_vmd_dev(struct device * dev)302 static struct device *to_vmd_dev(struct device *dev)
303 {
304 	struct pci_dev *pdev = to_pci_dev(dev);
305 	struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
306 
307 	return &vmd->dev->dev;
308 }
309 
vmd_dma_ops(struct device * dev)310 static const struct dma_map_ops *vmd_dma_ops(struct device *dev)
311 {
312 	return get_dma_ops(to_vmd_dev(dev));
313 }
314 
vmd_alloc(struct device * dev,size_t size,dma_addr_t * addr,gfp_t flag,unsigned long attrs)315 static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
316 		       gfp_t flag, unsigned long attrs)
317 {
318 	return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
319 				       attrs);
320 }
321 
vmd_free(struct device * dev,size_t size,void * vaddr,dma_addr_t addr,unsigned long attrs)322 static void vmd_free(struct device *dev, size_t size, void *vaddr,
323 		     dma_addr_t addr, unsigned long attrs)
324 {
325 	return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
326 				      attrs);
327 }
328 
vmd_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t addr,size_t size,unsigned long attrs)329 static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
330 		    void *cpu_addr, dma_addr_t addr, size_t size,
331 		    unsigned long attrs)
332 {
333 	return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
334 				      size, attrs);
335 }
336 
vmd_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t addr,size_t size,unsigned long attrs)337 static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
338 			   void *cpu_addr, dma_addr_t addr, size_t size,
339 			   unsigned long attrs)
340 {
341 	return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
342 					     addr, size, attrs);
343 }
344 
vmd_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)345 static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
346 			       unsigned long offset, size_t size,
347 			       enum dma_data_direction dir,
348 			       unsigned long attrs)
349 {
350 	return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
351 					  dir, attrs);
352 }
353 
vmd_unmap_page(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)354 static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
355 			   enum dma_data_direction dir, unsigned long attrs)
356 {
357 	vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
358 }
359 
vmd_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)360 static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
361 		      enum dma_data_direction dir, unsigned long attrs)
362 {
363 	return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
364 }
365 
vmd_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)366 static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
367 			 enum dma_data_direction dir, unsigned long attrs)
368 {
369 	vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
370 }
371 
vmd_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)372 static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
373 				    size_t size, enum dma_data_direction dir)
374 {
375 	vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
376 }
377 
vmd_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)378 static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
379 				       size_t size, enum dma_data_direction dir)
380 {
381 	vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size,
382 						 dir);
383 }
384 
vmd_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)385 static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
386 				int nents, enum dma_data_direction dir)
387 {
388 	vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
389 }
390 
vmd_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)391 static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
392 				   int nents, enum dma_data_direction dir)
393 {
394 	vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
395 }
396 
vmd_mapping_error(struct device * dev,dma_addr_t addr)397 static int vmd_mapping_error(struct device *dev, dma_addr_t addr)
398 {
399 	return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr);
400 }
401 
vmd_dma_supported(struct device * dev,u64 mask)402 static int vmd_dma_supported(struct device *dev, u64 mask)
403 {
404 	return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
405 }
406 
407 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
vmd_get_required_mask(struct device * dev)408 static u64 vmd_get_required_mask(struct device *dev)
409 {
410 	return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
411 }
412 #endif
413 
vmd_teardown_dma_ops(struct vmd_dev * vmd)414 static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
415 {
416 	struct dma_domain *domain = &vmd->dma_domain;
417 
418 	if (get_dma_ops(&vmd->dev->dev))
419 		del_dma_domain(domain);
420 }
421 
422 #define ASSIGN_VMD_DMA_OPS(source, dest, fn)	\
423 	do {					\
424 		if (source->fn)			\
425 			dest->fn = vmd_##fn;	\
426 	} while (0)
427 
vmd_setup_dma_ops(struct vmd_dev * vmd)428 static void vmd_setup_dma_ops(struct vmd_dev *vmd)
429 {
430 	const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
431 	struct dma_map_ops *dest = &vmd->dma_ops;
432 	struct dma_domain *domain = &vmd->dma_domain;
433 
434 	domain->domain_nr = vmd->sysdata.domain;
435 	domain->dma_ops = dest;
436 
437 	if (!source)
438 		return;
439 	ASSIGN_VMD_DMA_OPS(source, dest, alloc);
440 	ASSIGN_VMD_DMA_OPS(source, dest, free);
441 	ASSIGN_VMD_DMA_OPS(source, dest, mmap);
442 	ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
443 	ASSIGN_VMD_DMA_OPS(source, dest, map_page);
444 	ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
445 	ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
446 	ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
447 	ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
448 	ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
449 	ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
450 	ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
451 	ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
452 	ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
453 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
454 	ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
455 #endif
456 	add_dma_domain(domain);
457 }
458 #undef ASSIGN_VMD_DMA_OPS
459 #else
vmd_teardown_dma_ops(struct vmd_dev * vmd)460 static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {}
vmd_setup_dma_ops(struct vmd_dev * vmd)461 static void vmd_setup_dma_ops(struct vmd_dev *vmd) {}
462 #endif
463 
vmd_cfg_addr(struct vmd_dev * vmd,struct pci_bus * bus,unsigned int devfn,int reg,int len)464 static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
465 				  unsigned int devfn, int reg, int len)
466 {
467 	char __iomem *addr = vmd->cfgbar +
468 			     (bus->number << 20) + (devfn << 12) + reg;
469 
470 	if ((addr - vmd->cfgbar) + len >=
471 	    resource_size(&vmd->dev->resource[VMD_CFGBAR]))
472 		return NULL;
473 
474 	return addr;
475 }
476 
477 /*
478  * CPU may deadlock if config space is not serialized on some versions of this
479  * hardware, so all config space access is done under a spinlock.
480  */
vmd_pci_read(struct pci_bus * bus,unsigned int devfn,int reg,int len,u32 * value)481 static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
482 			int len, u32 *value)
483 {
484 	struct vmd_dev *vmd = vmd_from_bus(bus);
485 	char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
486 	unsigned long flags;
487 	int ret = 0;
488 
489 	if (!addr)
490 		return -EFAULT;
491 
492 	spin_lock_irqsave(&vmd->cfg_lock, flags);
493 	switch (len) {
494 	case 1:
495 		*value = readb(addr);
496 		break;
497 	case 2:
498 		*value = readw(addr);
499 		break;
500 	case 4:
501 		*value = readl(addr);
502 		break;
503 	default:
504 		ret = -EINVAL;
505 		break;
506 	}
507 	spin_unlock_irqrestore(&vmd->cfg_lock, flags);
508 	return ret;
509 }
510 
511 /*
512  * VMD h/w converts non-posted config writes to posted memory writes. The
513  * read-back in this function forces the completion so it returns only after
514  * the config space was written, as expected.
515  */
vmd_pci_write(struct pci_bus * bus,unsigned int devfn,int reg,int len,u32 value)516 static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
517 			 int len, u32 value)
518 {
519 	struct vmd_dev *vmd = vmd_from_bus(bus);
520 	char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
521 	unsigned long flags;
522 	int ret = 0;
523 
524 	if (!addr)
525 		return -EFAULT;
526 
527 	spin_lock_irqsave(&vmd->cfg_lock, flags);
528 	switch (len) {
529 	case 1:
530 		writeb(value, addr);
531 		readb(addr);
532 		break;
533 	case 2:
534 		writew(value, addr);
535 		readw(addr);
536 		break;
537 	case 4:
538 		writel(value, addr);
539 		readl(addr);
540 		break;
541 	default:
542 		ret = -EINVAL;
543 		break;
544 	}
545 	spin_unlock_irqrestore(&vmd->cfg_lock, flags);
546 	return ret;
547 }
548 
549 static struct pci_ops vmd_ops = {
550 	.read		= vmd_pci_read,
551 	.write		= vmd_pci_write,
552 };
553 
vmd_attach_resources(struct vmd_dev * vmd)554 static void vmd_attach_resources(struct vmd_dev *vmd)
555 {
556 	vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
557 	vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2];
558 }
559 
vmd_detach_resources(struct vmd_dev * vmd)560 static void vmd_detach_resources(struct vmd_dev *vmd)
561 {
562 	vmd->dev->resource[VMD_MEMBAR1].child = NULL;
563 	vmd->dev->resource[VMD_MEMBAR2].child = NULL;
564 }
565 
566 /*
567  * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
568  * Per ACPI r6.0, sec 6.5.6,  _SEG returns an integer, of which the lower
569  * 16 bits are the PCI Segment Group (domain) number.  Other bits are
570  * currently reserved.
571  */
vmd_find_free_domain(void)572 static int vmd_find_free_domain(void)
573 {
574 	int domain = 0xffff;
575 	struct pci_bus *bus = NULL;
576 
577 	while ((bus = pci_find_next_bus(bus)) != NULL)
578 		domain = max_t(int, domain, pci_domain_nr(bus));
579 	return domain + 1;
580 }
581 
vmd_enable_domain(struct vmd_dev * vmd,unsigned long features)582 static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
583 {
584 	struct pci_sysdata *sd = &vmd->sysdata;
585 	struct fwnode_handle *fn;
586 	struct resource *res;
587 	u32 upper_bits;
588 	unsigned long flags;
589 	LIST_HEAD(resources);
590 	resource_size_t offset[2] = {0};
591 	resource_size_t membar2_offset = 0x2000, busn_start = 0;
592 
593 	/*
594 	 * Shadow registers may exist in certain VMD device ids which allow
595 	 * guests to correctly assign host physical addresses to the root ports
596 	 * and child devices. These registers will either return the host value
597 	 * or 0, depending on an enable bit in the VMD device.
598 	 */
599 	if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
600 		u32 vmlock;
601 		int ret;
602 
603 		membar2_offset = 0x2018;
604 		ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
605 		if (ret || vmlock == ~0)
606 			return -ENODEV;
607 
608 		if (MB2_SHADOW_EN(vmlock)) {
609 			void __iomem *membar2;
610 
611 			membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
612 			if (!membar2)
613 				return -ENOMEM;
614 			offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
615 						readq(membar2 + 0x2008);
616 			offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
617 						readq(membar2 + 0x2010);
618 			pci_iounmap(vmd->dev, membar2);
619 		}
620 	}
621 
622 	/*
623 	 * Certain VMD devices may have a root port configuration option which
624 	 * limits the bus range to between 0-127 or 128-255
625 	 */
626 	if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
627 		u32 vmcap, vmconfig;
628 
629 		pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
630 		pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
631 		if (BUS_RESTRICT_CAP(vmcap) &&
632 		    (BUS_RESTRICT_CFG(vmconfig) == 0x1))
633 			busn_start = 128;
634 	}
635 
636 	res = &vmd->dev->resource[VMD_CFGBAR];
637 	vmd->resources[0] = (struct resource) {
638 		.name  = "VMD CFGBAR",
639 		.start = busn_start,
640 		.end   = busn_start + (resource_size(res) >> 20) - 1,
641 		.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
642 	};
643 
644 	/*
645 	 * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can
646 	 * put 32-bit resources in the window.
647 	 *
648 	 * There's no hardware reason why a 64-bit window *couldn't*
649 	 * contain a 32-bit resource, but pbus_size_mem() computes the
650 	 * bridge window size assuming a 64-bit window will contain no
651 	 * 32-bit resources.  __pci_assign_resource() enforces that
652 	 * artificial restriction to make sure everything will fit.
653 	 *
654 	 * The only way we could use a 64-bit non-prefechable MEMBAR is
655 	 * if its address is <4GB so that we can convert it to a 32-bit
656 	 * resource.  To be visible to the host OS, all VMD endpoints must
657 	 * be initially configured by platform BIOS, which includes setting
658 	 * up these resources.  We can assume the device is configured
659 	 * according to the platform needs.
660 	 */
661 	res = &vmd->dev->resource[VMD_MEMBAR1];
662 	upper_bits = upper_32_bits(res->end);
663 	flags = res->flags & ~IORESOURCE_SIZEALIGN;
664 	if (!upper_bits)
665 		flags &= ~IORESOURCE_MEM_64;
666 	vmd->resources[1] = (struct resource) {
667 		.name  = "VMD MEMBAR1",
668 		.start = res->start,
669 		.end   = res->end,
670 		.flags = flags,
671 		.parent = res,
672 	};
673 
674 	res = &vmd->dev->resource[VMD_MEMBAR2];
675 	upper_bits = upper_32_bits(res->end);
676 	flags = res->flags & ~IORESOURCE_SIZEALIGN;
677 	if (!upper_bits)
678 		flags &= ~IORESOURCE_MEM_64;
679 	vmd->resources[2] = (struct resource) {
680 		.name  = "VMD MEMBAR2",
681 		.start = res->start + membar2_offset,
682 		.end   = res->end,
683 		.flags = flags,
684 		.parent = res,
685 	};
686 
687 	sd->vmd_domain = true;
688 	sd->domain = vmd_find_free_domain();
689 	if (sd->domain < 0)
690 		return sd->domain;
691 
692 	sd->node = pcibus_to_node(vmd->dev->bus);
693 
694 	fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
695 	if (!fn)
696 		return -ENODEV;
697 
698 	vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
699 						    x86_vector_domain);
700 	irq_domain_free_fwnode(fn);
701 	if (!vmd->irq_domain)
702 		return -ENODEV;
703 
704 	pci_add_resource(&resources, &vmd->resources[0]);
705 	pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
706 	pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
707 
708 	vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
709 				       sd, &resources);
710 	if (!vmd->bus) {
711 		pci_free_resource_list(&resources);
712 		irq_domain_remove(vmd->irq_domain);
713 		return -ENODEV;
714 	}
715 
716 	vmd_attach_resources(vmd);
717 	vmd_setup_dma_ops(vmd);
718 	dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
719 	pci_rescan_bus(vmd->bus);
720 
721 	WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
722 			       "domain"), "Can't create symlink to domain\n");
723 	return 0;
724 }
725 
vmd_irq(int irq,void * data)726 static irqreturn_t vmd_irq(int irq, void *data)
727 {
728 	struct vmd_irq_list *irqs = data;
729 	struct vmd_irq *vmdirq;
730 	int idx;
731 
732 	idx = srcu_read_lock(&irqs->srcu);
733 	list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
734 		generic_handle_irq(vmdirq->virq);
735 	srcu_read_unlock(&irqs->srcu, idx);
736 
737 	return IRQ_HANDLED;
738 }
739 
vmd_probe(struct pci_dev * dev,const struct pci_device_id * id)740 static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
741 {
742 	struct vmd_dev *vmd;
743 	int i, err;
744 
745 	if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
746 		return -ENOMEM;
747 
748 	vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
749 	if (!vmd)
750 		return -ENOMEM;
751 
752 	vmd->dev = dev;
753 	err = pcim_enable_device(dev);
754 	if (err < 0)
755 		return err;
756 
757 	vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
758 	if (!vmd->cfgbar)
759 		return -ENOMEM;
760 
761 	pci_set_master(dev);
762 	if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
763 	    dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
764 		return -ENODEV;
765 
766 	vmd->msix_count = pci_msix_vec_count(dev);
767 	if (vmd->msix_count < 0)
768 		return -ENODEV;
769 
770 	vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
771 					PCI_IRQ_MSIX);
772 	if (vmd->msix_count < 0)
773 		return vmd->msix_count;
774 
775 	vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
776 				 GFP_KERNEL);
777 	if (!vmd->irqs)
778 		return -ENOMEM;
779 
780 	for (i = 0; i < vmd->msix_count; i++) {
781 		err = init_srcu_struct(&vmd->irqs[i].srcu);
782 		if (err)
783 			return err;
784 
785 		INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
786 		err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
787 				       vmd_irq, IRQF_NO_THREAD,
788 				       "vmd", &vmd->irqs[i]);
789 		if (err)
790 			return err;
791 	}
792 
793 	spin_lock_init(&vmd->cfg_lock);
794 	pci_set_drvdata(dev, vmd);
795 	err = vmd_enable_domain(vmd, (unsigned long) id->driver_data);
796 	if (err)
797 		return err;
798 
799 	dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
800 		 vmd->sysdata.domain);
801 	return 0;
802 }
803 
vmd_cleanup_srcu(struct vmd_dev * vmd)804 static void vmd_cleanup_srcu(struct vmd_dev *vmd)
805 {
806 	int i;
807 
808 	for (i = 0; i < vmd->msix_count; i++)
809 		cleanup_srcu_struct(&vmd->irqs[i].srcu);
810 }
811 
vmd_remove(struct pci_dev * dev)812 static void vmd_remove(struct pci_dev *dev)
813 {
814 	struct vmd_dev *vmd = pci_get_drvdata(dev);
815 
816 	vmd_detach_resources(vmd);
817 	sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
818 	pci_stop_root_bus(vmd->bus);
819 	pci_remove_root_bus(vmd->bus);
820 	vmd_cleanup_srcu(vmd);
821 	vmd_teardown_dma_ops(vmd);
822 	irq_domain_remove(vmd->irq_domain);
823 }
824 
825 #ifdef CONFIG_PM_SLEEP
vmd_suspend(struct device * dev)826 static int vmd_suspend(struct device *dev)
827 {
828 	struct pci_dev *pdev = to_pci_dev(dev);
829 	struct vmd_dev *vmd = pci_get_drvdata(pdev);
830 	int i;
831 
832 	for (i = 0; i < vmd->msix_count; i++)
833                 devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
834 
835 	pci_save_state(pdev);
836 	return 0;
837 }
838 
vmd_resume(struct device * dev)839 static int vmd_resume(struct device *dev)
840 {
841 	struct pci_dev *pdev = to_pci_dev(dev);
842 	struct vmd_dev *vmd = pci_get_drvdata(pdev);
843 	int err, i;
844 
845 	for (i = 0; i < vmd->msix_count; i++) {
846 		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
847 				       vmd_irq, IRQF_NO_THREAD,
848 				       "vmd", &vmd->irqs[i]);
849 		if (err)
850 			return err;
851 	}
852 
853 	pci_restore_state(pdev);
854 	return 0;
855 }
856 #endif
857 static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
858 
859 static const struct pci_device_id vmd_ids[] = {
860 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),},
861 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
862 		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
863 				VMD_FEAT_HAS_BUS_RESTRICTIONS,},
864 	{0,}
865 };
866 MODULE_DEVICE_TABLE(pci, vmd_ids);
867 
868 static struct pci_driver vmd_drv = {
869 	.name		= "vmd",
870 	.id_table	= vmd_ids,
871 	.probe		= vmd_probe,
872 	.remove		= vmd_remove,
873 	.driver		= {
874 		.pm	= &vmd_dev_pm_ops,
875 	},
876 };
877 module_pci_driver(vmd_drv);
878 
879 MODULE_AUTHOR("Intel Corporation");
880 MODULE_LICENSE("GPL v2");
881 MODULE_VERSION("0.6");
882