1  /*
2   * CAAM/SEC 4.x transport/backend driver
3   * JobR backend functionality
4   *
5   * Copyright 2008-2012 Freescale Semiconductor, Inc.
6   */
7  
8  #include <linux/of_irq.h>
9  #include <linux/of_address.h>
10  
11  #include "compat.h"
12  #include "ctrl.h"
13  #include "regs.h"
14  #include "jr.h"
15  #include "desc.h"
16  #include "intern.h"
17  
18  struct jr_driver_data {
19  	/* List of Physical JobR's with the Driver */
20  	struct list_head	jr_list;
21  	spinlock_t		jr_alloc_lock;	/* jr_list lock */
22  } ____cacheline_aligned;
23  
24  static struct jr_driver_data driver_data;
25  
caam_reset_hw_jr(struct device * dev)26  static int caam_reset_hw_jr(struct device *dev)
27  {
28  	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
29  	unsigned int timeout = 100000;
30  
31  	/*
32  	 * mask interrupts since we are going to poll
33  	 * for reset completion status
34  	 */
35  	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
36  
37  	/* initiate flush (required prior to reset) */
38  	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
39  	while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
40  		JRINT_ERR_HALT_INPROGRESS) && --timeout)
41  		cpu_relax();
42  
43  	if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
44  	    JRINT_ERR_HALT_COMPLETE || timeout == 0) {
45  		dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
46  		return -EIO;
47  	}
48  
49  	/* initiate reset */
50  	timeout = 100000;
51  	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
52  	while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
53  		cpu_relax();
54  
55  	if (timeout == 0) {
56  		dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
57  		return -EIO;
58  	}
59  
60  	/* unmask interrupts */
61  	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
62  
63  	return 0;
64  }
65  
66  /*
67   * Shutdown JobR independent of platform property code
68   */
caam_jr_shutdown(struct device * dev)69  static int caam_jr_shutdown(struct device *dev)
70  {
71  	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
72  	dma_addr_t inpbusaddr, outbusaddr;
73  	int ret;
74  
75  	ret = caam_reset_hw_jr(dev);
76  
77  	tasklet_kill(&jrp->irqtask);
78  
79  	/* Release interrupt */
80  	free_irq(jrp->irq, dev);
81  
82  	/* Free rings */
83  	inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
84  	outbusaddr = rd_reg64(&jrp->rregs->outring_base);
85  	dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
86  			  jrp->inpring, inpbusaddr);
87  	dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
88  			  jrp->outring, outbusaddr);
89  	kfree(jrp->entinfo);
90  
91  	return ret;
92  }
93  
caam_jr_remove(struct platform_device * pdev)94  static int caam_jr_remove(struct platform_device *pdev)
95  {
96  	int ret;
97  	struct device *jrdev;
98  	struct caam_drv_private_jr *jrpriv;
99  
100  	jrdev = &pdev->dev;
101  	jrpriv = dev_get_drvdata(jrdev);
102  
103  	/*
104  	 * Return EBUSY if job ring already allocated.
105  	 */
106  	if (atomic_read(&jrpriv->tfm_count)) {
107  		dev_err(jrdev, "Device is busy\n");
108  		return -EBUSY;
109  	}
110  
111  	/* Remove the node from Physical JobR list maintained by driver */
112  	spin_lock(&driver_data.jr_alloc_lock);
113  	list_del(&jrpriv->list_node);
114  	spin_unlock(&driver_data.jr_alloc_lock);
115  
116  	/* Release ring */
117  	ret = caam_jr_shutdown(jrdev);
118  	if (ret)
119  		dev_err(jrdev, "Failed to shut down job ring\n");
120  	irq_dispose_mapping(jrpriv->irq);
121  
122  	return ret;
123  }
124  
125  /* Main per-ring interrupt handler */
caam_jr_interrupt(int irq,void * st_dev)126  static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
127  {
128  	struct device *dev = st_dev;
129  	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
130  	u32 irqstate;
131  
132  	/*
133  	 * Check the output ring for ready responses, kick
134  	 * tasklet if jobs done.
135  	 */
136  	irqstate = rd_reg32(&jrp->rregs->jrintstatus);
137  	if (!irqstate)
138  		return IRQ_NONE;
139  
140  	/*
141  	 * If JobR error, we got more development work to do
142  	 * Flag a bug now, but we really need to shut down and
143  	 * restart the queue (and fix code).
144  	 */
145  	if (irqstate & JRINT_JR_ERROR) {
146  		dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
147  		BUG();
148  	}
149  
150  	/* mask valid interrupts */
151  	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
152  
153  	/* Have valid interrupt at this point, just ACK and trigger */
154  	wr_reg32(&jrp->rregs->jrintstatus, irqstate);
155  
156  	preempt_disable();
157  	tasklet_schedule(&jrp->irqtask);
158  	preempt_enable();
159  
160  	return IRQ_HANDLED;
161  }
162  
163  /* Deferred service handler, run as interrupt-fired tasklet */
caam_jr_dequeue(unsigned long devarg)164  static void caam_jr_dequeue(unsigned long devarg)
165  {
166  	int hw_idx, sw_idx, i, head, tail;
167  	struct device *dev = (struct device *)devarg;
168  	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
169  	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
170  	u32 *userdesc, userstatus;
171  	void *userarg;
172  
173  	while (rd_reg32(&jrp->rregs->outring_used)) {
174  
175  		head = READ_ONCE(jrp->head);
176  
177  		spin_lock(&jrp->outlock);
178  
179  		sw_idx = tail = jrp->tail;
180  		hw_idx = jrp->out_ring_read_index;
181  
182  		for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
183  			sw_idx = (tail + i) & (JOBR_DEPTH - 1);
184  
185  			if (jrp->outring[hw_idx].desc ==
186  			    caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
187  				break; /* found */
188  		}
189  		/* we should never fail to find a matching descriptor */
190  		BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
191  
192  		/* Unmap just-run descriptor so we can post-process */
193  		dma_unmap_single(dev,
194  				 caam_dma_to_cpu(jrp->outring[hw_idx].desc),
195  				 jrp->entinfo[sw_idx].desc_size,
196  				 DMA_TO_DEVICE);
197  
198  		/* mark completed, avoid matching on a recycled desc addr */
199  		jrp->entinfo[sw_idx].desc_addr_dma = 0;
200  
201  		/* Stash callback params for use outside of lock */
202  		usercall = jrp->entinfo[sw_idx].callbk;
203  		userarg = jrp->entinfo[sw_idx].cbkarg;
204  		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
205  		userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus);
206  
207  		/*
208  		 * Make sure all information from the job has been obtained
209  		 * before telling CAAM that the job has been removed from the
210  		 * output ring.
211  		 */
212  		mb();
213  
214  		/* set done */
215  		wr_reg32(&jrp->rregs->outring_rmvd, 1);
216  
217  		jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
218  					   (JOBR_DEPTH - 1);
219  
220  		/*
221  		 * if this job completed out-of-order, do not increment
222  		 * the tail.  Otherwise, increment tail by 1 plus the
223  		 * number of subsequent jobs already completed out-of-order
224  		 */
225  		if (sw_idx == tail) {
226  			do {
227  				tail = (tail + 1) & (JOBR_DEPTH - 1);
228  			} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
229  				 jrp->entinfo[tail].desc_addr_dma == 0);
230  
231  			jrp->tail = tail;
232  		}
233  
234  		spin_unlock(&jrp->outlock);
235  
236  		/* Finally, execute user's callback */
237  		usercall(dev, userdesc, userstatus, userarg);
238  	}
239  
240  	/* reenable / unmask IRQs */
241  	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
242  }
243  
244  /**
245   * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
246   *
247   * returns :  pointer to the newly allocated physical
248   *	      JobR dev can be written to if successful.
249   **/
caam_jr_alloc(void)250  struct device *caam_jr_alloc(void)
251  {
252  	struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
253  	struct device *dev = ERR_PTR(-ENODEV);
254  	int min_tfm_cnt	= INT_MAX;
255  	int tfm_cnt;
256  
257  	spin_lock(&driver_data.jr_alloc_lock);
258  
259  	if (list_empty(&driver_data.jr_list)) {
260  		spin_unlock(&driver_data.jr_alloc_lock);
261  		return ERR_PTR(-ENODEV);
262  	}
263  
264  	list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
265  		tfm_cnt = atomic_read(&jrpriv->tfm_count);
266  		if (tfm_cnt < min_tfm_cnt) {
267  			min_tfm_cnt = tfm_cnt;
268  			min_jrpriv = jrpriv;
269  		}
270  		if (!min_tfm_cnt)
271  			break;
272  	}
273  
274  	if (min_jrpriv) {
275  		atomic_inc(&min_jrpriv->tfm_count);
276  		dev = min_jrpriv->dev;
277  	}
278  	spin_unlock(&driver_data.jr_alloc_lock);
279  
280  	return dev;
281  }
282  EXPORT_SYMBOL(caam_jr_alloc);
283  
284  /**
285   * caam_jr_free() - Free the Job Ring
286   * @rdev     - points to the dev that identifies the Job ring to
287   *             be released.
288   **/
caam_jr_free(struct device * rdev)289  void caam_jr_free(struct device *rdev)
290  {
291  	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
292  
293  	atomic_dec(&jrpriv->tfm_count);
294  }
295  EXPORT_SYMBOL(caam_jr_free);
296  
297  /**
298   * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
299   * -EBUSY if the queue is full, -EIO if it cannot map the caller's
300   * descriptor.
301   * @dev:  device of the job ring to be used. This device should have
302   *        been assigned prior by caam_jr_register().
303   * @desc: points to a job descriptor that execute our request. All
304   *        descriptors (and all referenced data) must be in a DMAable
305   *        region, and all data references must be physical addresses
306   *        accessible to CAAM (i.e. within a PAMU window granted
307   *        to it).
308   * @cbk:  pointer to a callback function to be invoked upon completion
309   *        of this request. This has the form:
310   *        callback(struct device *dev, u32 *desc, u32 stat, void *arg)
311   *        where:
312   *        @dev:    contains the job ring device that processed this
313   *                 response.
314   *        @desc:   descriptor that initiated the request, same as
315   *                 "desc" being argued to caam_jr_enqueue().
316   *        @status: untranslated status received from CAAM. See the
317   *                 reference manual for a detailed description of
318   *                 error meaning, or see the JRSTA definitions in the
319   *                 register header file
320   *        @areq:   optional pointer to an argument passed with the
321   *                 original request
322   * @areq: optional pointer to a user argument for use at callback
323   *        time.
324   **/
caam_jr_enqueue(struct device * dev,u32 * desc,void (* cbk)(struct device * dev,u32 * desc,u32 status,void * areq),void * areq)325  int caam_jr_enqueue(struct device *dev, u32 *desc,
326  		    void (*cbk)(struct device *dev, u32 *desc,
327  				u32 status, void *areq),
328  		    void *areq)
329  {
330  	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
331  	struct caam_jrentry_info *head_entry;
332  	int head, tail, desc_size;
333  	dma_addr_t desc_dma;
334  
335  	desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32);
336  	desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
337  	if (dma_mapping_error(dev, desc_dma)) {
338  		dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
339  		return -EIO;
340  	}
341  
342  	spin_lock_bh(&jrp->inplock);
343  
344  	head = jrp->head;
345  	tail = READ_ONCE(jrp->tail);
346  
347  	if (!rd_reg32(&jrp->rregs->inpring_avail) ||
348  	    CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
349  		spin_unlock_bh(&jrp->inplock);
350  		dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
351  		return -EBUSY;
352  	}
353  
354  	head_entry = &jrp->entinfo[head];
355  	head_entry->desc_addr_virt = desc;
356  	head_entry->desc_size = desc_size;
357  	head_entry->callbk = (void *)cbk;
358  	head_entry->cbkarg = areq;
359  	head_entry->desc_addr_dma = desc_dma;
360  
361  	jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
362  
363  	/*
364  	 * Guarantee that the descriptor's DMA address has been written to
365  	 * the next slot in the ring before the write index is updated, since
366  	 * other cores may update this index independently.
367  	 */
368  	smp_wmb();
369  
370  	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
371  				    (JOBR_DEPTH - 1);
372  	jrp->head = (head + 1) & (JOBR_DEPTH - 1);
373  
374  	/*
375  	 * Ensure that all job information has been written before
376  	 * notifying CAAM that a new job was added to the input ring.
377  	 */
378  	wmb();
379  
380  	wr_reg32(&jrp->rregs->inpring_jobadd, 1);
381  
382  	spin_unlock_bh(&jrp->inplock);
383  
384  	return 0;
385  }
386  EXPORT_SYMBOL(caam_jr_enqueue);
387  
388  /*
389   * Init JobR independent of platform property detection
390   */
caam_jr_init(struct device * dev)391  static int caam_jr_init(struct device *dev)
392  {
393  	struct caam_drv_private_jr *jrp;
394  	dma_addr_t inpbusaddr, outbusaddr;
395  	int i, error;
396  
397  	jrp = dev_get_drvdata(dev);
398  
399  	tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
400  
401  	/* Connect job ring interrupt handler. */
402  	error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
403  			    dev_name(dev), dev);
404  	if (error) {
405  		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
406  			jrp->ridx, jrp->irq);
407  		goto out_kill_deq;
408  	}
409  
410  	error = caam_reset_hw_jr(dev);
411  	if (error)
412  		goto out_free_irq;
413  
414  	error = -ENOMEM;
415  	jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
416  					  JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
417  	if (!jrp->inpring)
418  		goto out_free_irq;
419  
420  	jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
421  					  JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
422  	if (!jrp->outring)
423  		goto out_free_inpring;
424  
425  	jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
426  	if (!jrp->entinfo)
427  		goto out_free_outring;
428  
429  	for (i = 0; i < JOBR_DEPTH; i++)
430  		jrp->entinfo[i].desc_addr_dma = !0;
431  
432  	/* Setup rings */
433  	jrp->inp_ring_write_index = 0;
434  	jrp->out_ring_read_index = 0;
435  	jrp->head = 0;
436  	jrp->tail = 0;
437  
438  	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
439  	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
440  	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
441  	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
442  
443  	jrp->ringsize = JOBR_DEPTH;
444  
445  	spin_lock_init(&jrp->inplock);
446  	spin_lock_init(&jrp->outlock);
447  
448  	/* Select interrupt coalescing parameters */
449  	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
450  		      (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
451  		      (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
452  
453  	return 0;
454  
455  out_free_outring:
456  	dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
457  			  jrp->outring, outbusaddr);
458  out_free_inpring:
459  	dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
460  			  jrp->inpring, inpbusaddr);
461  	dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
462  out_free_irq:
463  	free_irq(jrp->irq, dev);
464  out_kill_deq:
465  	tasklet_kill(&jrp->irqtask);
466  	return error;
467  }
468  
469  
470  /*
471   * Probe routine for each detected JobR subsystem.
472   */
caam_jr_probe(struct platform_device * pdev)473  static int caam_jr_probe(struct platform_device *pdev)
474  {
475  	struct device *jrdev;
476  	struct device_node *nprop;
477  	struct caam_job_ring __iomem *ctrl;
478  	struct caam_drv_private_jr *jrpriv;
479  	static int total_jobrs;
480  	int error;
481  
482  	jrdev = &pdev->dev;
483  	jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
484  	if (!jrpriv)
485  		return -ENOMEM;
486  
487  	dev_set_drvdata(jrdev, jrpriv);
488  
489  	/* save ring identity relative to detection */
490  	jrpriv->ridx = total_jobrs++;
491  
492  	nprop = pdev->dev.of_node;
493  	/* Get configuration properties from device tree */
494  	/* First, get register page */
495  	ctrl = of_iomap(nprop, 0);
496  	if (!ctrl) {
497  		dev_err(jrdev, "of_iomap() failed\n");
498  		return -ENOMEM;
499  	}
500  
501  	jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
502  
503  	if (sizeof(dma_addr_t) == sizeof(u64)) {
504  		if (caam_dpaa2)
505  			error = dma_set_mask_and_coherent(jrdev,
506  							  DMA_BIT_MASK(49));
507  		else if (of_device_is_compatible(nprop,
508  						 "fsl,sec-v5.0-job-ring"))
509  			error = dma_set_mask_and_coherent(jrdev,
510  							  DMA_BIT_MASK(40));
511  		else
512  			error = dma_set_mask_and_coherent(jrdev,
513  							  DMA_BIT_MASK(36));
514  	} else {
515  		error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
516  	}
517  	if (error) {
518  		dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
519  			error);
520  		iounmap(ctrl);
521  		return error;
522  	}
523  
524  	/* Identify the interrupt */
525  	jrpriv->irq = irq_of_parse_and_map(nprop, 0);
526  
527  	/* Now do the platform independent part */
528  	error = caam_jr_init(jrdev); /* now turn on hardware */
529  	if (error) {
530  		irq_dispose_mapping(jrpriv->irq);
531  		iounmap(ctrl);
532  		return error;
533  	}
534  
535  	jrpriv->dev = jrdev;
536  	spin_lock(&driver_data.jr_alloc_lock);
537  	list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
538  	spin_unlock(&driver_data.jr_alloc_lock);
539  
540  	atomic_set(&jrpriv->tfm_count, 0);
541  
542  	return 0;
543  }
544  
545  static const struct of_device_id caam_jr_match[] = {
546  	{
547  		.compatible = "fsl,sec-v4.0-job-ring",
548  	},
549  	{
550  		.compatible = "fsl,sec4.0-job-ring",
551  	},
552  	{},
553  };
554  MODULE_DEVICE_TABLE(of, caam_jr_match);
555  
556  static struct platform_driver caam_jr_driver = {
557  	.driver = {
558  		.name = "caam_jr",
559  		.of_match_table = caam_jr_match,
560  	},
561  	.probe       = caam_jr_probe,
562  	.remove      = caam_jr_remove,
563  };
564  
jr_driver_init(void)565  static int __init jr_driver_init(void)
566  {
567  	spin_lock_init(&driver_data.jr_alloc_lock);
568  	INIT_LIST_HEAD(&driver_data.jr_list);
569  	return platform_driver_register(&caam_jr_driver);
570  }
571  
jr_driver_exit(void)572  static void __exit jr_driver_exit(void)
573  {
574  	platform_driver_unregister(&caam_jr_driver);
575  }
576  
577  module_init(jr_driver_init);
578  module_exit(jr_driver_exit);
579  
580  MODULE_LICENSE("GPL");
581  MODULE_DESCRIPTION("FSL CAAM JR request backend");
582  MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
583