1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Linux MegaRAID device driver
5 *
6 * Copyright (c) 2003-2004 LSI Logic Corporation.
7 *
8 * FILE : megaraid_mm.c
9 * Version : v2.20.2.7 (Jul 16 2006)
10 *
11 * Common management module
12 */
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/mutex.h>
16 #include "megaraid_mm.h"
17
18
19 // Entry points for char node driver
20 static DEFINE_MUTEX(mraid_mm_mutex);
21 static int mraid_mm_open(struct inode *, struct file *);
22 static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
23
24
25 // routines to convert to and from the old the format
26 static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
27 static int kioc_to_mimd(uioc_t *, mimd_t __user *);
28
29
30 // Helper functions
31 static int handle_drvrcmd(void __user *, uint8_t, int *);
32 static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
33 static void ioctl_done(uioc_t *);
34 static void lld_timedout(struct timer_list *);
35 static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
36 static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
37 static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
38 static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
39 static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
40 static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
41 static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
42 static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
43
44 MODULE_AUTHOR("LSI Logic Corporation");
45 MODULE_DESCRIPTION("LSI Logic Management Module");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(LSI_COMMON_MOD_VERSION);
48
49 static int dbglevel = CL_ANN;
50 module_param_named(dlevel, dbglevel, int, 0);
51 MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
52
53 EXPORT_SYMBOL(mraid_mm_register_adp);
54 EXPORT_SYMBOL(mraid_mm_unregister_adp);
55 EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
56
57 static uint32_t drvr_ver = 0x02200207;
58
59 static int adapters_count_g;
60 static struct list_head adapters_list_g;
61
62 static wait_queue_head_t wait_q;
63
64 static const struct file_operations lsi_fops = {
65 .open = mraid_mm_open,
66 .unlocked_ioctl = mraid_mm_unlocked_ioctl,
67 .compat_ioctl = compat_ptr_ioctl,
68 .owner = THIS_MODULE,
69 .llseek = noop_llseek,
70 };
71
72 static struct miscdevice megaraid_mm_dev = {
73 .minor = MISC_DYNAMIC_MINOR,
74 .name = "megadev0",
75 .fops = &lsi_fops,
76 };
77
78 /**
79 * mraid_mm_open - open routine for char node interface
80 * @inode : unused
81 * @filep : unused
82 *
83 * Allow ioctl operations by apps only if they have superuser privilege.
84 */
85 static int
mraid_mm_open(struct inode * inode,struct file * filep)86 mraid_mm_open(struct inode *inode, struct file *filep)
87 {
88 /*
89 * Only allow superuser to access private ioctl interface
90 */
91 if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
92
93 return 0;
94 }
95
96 /**
97 * mraid_mm_ioctl - module entry-point for ioctls
98 * @filep : file operations pointer (ignored)
99 * @cmd : ioctl command
100 * @arg : user ioctl packet
101 */
102 static int
mraid_mm_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)103 mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
104 {
105 uioc_t *kioc;
106 char signature[EXT_IOCTL_SIGN_SZ] = {0};
107 int rval;
108 mraid_mmadp_t *adp;
109 uint8_t old_ioctl;
110 int drvrcmd_rval;
111 void __user *argp = (void __user *)arg;
112
113 /*
114 * Make sure only USCSICMD are issued through this interface.
115 * MIMD application would still fire different command.
116 */
117
118 if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
119 return (-EINVAL);
120 }
121
122 /*
123 * Look for signature to see if this is the new or old ioctl format.
124 */
125 if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
126 con_log(CL_ANN, (KERN_WARNING
127 "megaraid cmm: copy from usr addr failed\n"));
128 return (-EFAULT);
129 }
130
131 if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
132 old_ioctl = 0;
133 else
134 old_ioctl = 1;
135
136 /*
137 * At present, we don't support the new ioctl packet
138 */
139 if (!old_ioctl )
140 return (-EINVAL);
141
142 /*
143 * If it is a driver ioctl (as opposed to fw ioctls), then we can
144 * handle the command locally. rval > 0 means it is not a drvr cmd
145 */
146 rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
147
148 if (rval < 0)
149 return rval;
150 else if (rval == 0)
151 return drvrcmd_rval;
152
153 rval = 0;
154 if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
155 return rval;
156 }
157
158 /*
159 * Check if adapter can accept ioctl. We may have marked it offline
160 * if any previous kioc had timedout on this controller.
161 */
162 if (!adp->quiescent) {
163 con_log(CL_ANN, (KERN_WARNING
164 "megaraid cmm: controller cannot accept cmds due to "
165 "earlier errors\n" ));
166 return -EFAULT;
167 }
168
169 /*
170 * The following call will block till a kioc is available
171 * or return NULL if the list head is empty for the pointer
172 * of type mraid_mmapt passed to mraid_mm_alloc_kioc
173 */
174 kioc = mraid_mm_alloc_kioc(adp);
175 if (!kioc)
176 return -ENXIO;
177
178 /*
179 * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
180 */
181 if ((rval = mimd_to_kioc(argp, adp, kioc))) {
182 mraid_mm_dealloc_kioc(adp, kioc);
183 return rval;
184 }
185
186 kioc->done = ioctl_done;
187
188 /*
189 * Issue the IOCTL to the low level driver. After the IOCTL completes
190 * release the kioc if and only if it was _not_ timedout. If it was
191 * timedout, that means that resources are still with low level driver.
192 */
193 if ((rval = lld_ioctl(adp, kioc))) {
194
195 if (!kioc->timedout)
196 mraid_mm_dealloc_kioc(adp, kioc);
197
198 return rval;
199 }
200
201 /*
202 * Convert the kioc back to user space
203 */
204 rval = kioc_to_mimd(kioc, argp);
205
206 /*
207 * Return the kioc to free pool
208 */
209 mraid_mm_dealloc_kioc(adp, kioc);
210
211 return rval;
212 }
213
214 static long
mraid_mm_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)215 mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
216 unsigned long arg)
217 {
218 int err;
219
220 mutex_lock(&mraid_mm_mutex);
221 err = mraid_mm_ioctl(filep, cmd, arg);
222 mutex_unlock(&mraid_mm_mutex);
223
224 return err;
225 }
226
227 /**
228 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
229 * @umimd : User space mimd_t ioctl packet
230 * @rval : returned success/error status
231 *
232 * The function return value is a pointer to the located @adapter.
233 */
234 static mraid_mmadp_t *
mraid_mm_get_adapter(mimd_t __user * umimd,int * rval)235 mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
236 {
237 mraid_mmadp_t *adapter;
238 mimd_t mimd;
239 uint32_t adapno;
240 int iterator;
241
242
243 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
244 *rval = -EFAULT;
245 return NULL;
246 }
247
248 adapno = GETADAP(mimd.ui.fcs.adapno);
249
250 if (adapno >= adapters_count_g) {
251 *rval = -ENODEV;
252 return NULL;
253 }
254
255 adapter = NULL;
256 iterator = 0;
257
258 list_for_each_entry(adapter, &adapters_list_g, list) {
259 if (iterator++ == adapno) break;
260 }
261
262 if (!adapter) {
263 *rval = -ENODEV;
264 return NULL;
265 }
266
267 return adapter;
268 }
269
270 /**
271 * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
272 * @arg : packet sent by the user app
273 * @old_ioctl : mimd if 1; uioc otherwise
274 * @rval : pointer for command's returned value (not function status)
275 */
276 static int
handle_drvrcmd(void __user * arg,uint8_t old_ioctl,int * rval)277 handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
278 {
279 mimd_t __user *umimd;
280 mimd_t kmimd;
281 uint8_t opcode;
282 uint8_t subopcode;
283
284 if (old_ioctl)
285 goto old_packet;
286 else
287 goto new_packet;
288
289 new_packet:
290 return (-ENOTSUPP);
291
292 old_packet:
293 *rval = 0;
294 umimd = arg;
295
296 if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
297 return (-EFAULT);
298
299 opcode = kmimd.ui.fcs.opcode;
300 subopcode = kmimd.ui.fcs.subopcode;
301
302 /*
303 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
304 * GET_NUMADP, then we can handle. Otherwise we should return 1 to
305 * indicate that we cannot handle this.
306 */
307 if (opcode != 0x82)
308 return 1;
309
310 switch (subopcode) {
311
312 case MEGAIOC_QDRVRVER:
313
314 if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
315 return (-EFAULT);
316
317 return 0;
318
319 case MEGAIOC_QNADAP:
320
321 *rval = adapters_count_g;
322
323 if (copy_to_user(kmimd.data, &adapters_count_g,
324 sizeof(uint32_t)))
325 return (-EFAULT);
326
327 return 0;
328
329 default:
330 /* cannot handle */
331 return 1;
332 }
333
334 return 0;
335 }
336
337
338 /**
339 * mimd_to_kioc - Converter from old to new ioctl format
340 * @umimd : user space old MIMD IOCTL
341 * @adp : adapter softstate
342 * @kioc : kernel space new format IOCTL
343 *
344 * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
345 * new packet is in kernel space so that driver can perform operations on it
346 * freely.
347 */
348
349 static int
mimd_to_kioc(mimd_t __user * umimd,mraid_mmadp_t * adp,uioc_t * kioc)350 mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
351 {
352 mbox64_t *mbox64;
353 mbox_t *mbox;
354 mraid_passthru_t *pthru32;
355 uint32_t adapno;
356 uint8_t opcode;
357 uint8_t subopcode;
358 mimd_t mimd;
359
360 if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
361 return (-EFAULT);
362
363 /*
364 * Applications are not allowed to send extd pthru
365 */
366 if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
367 (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
368 return (-EINVAL);
369
370 opcode = mimd.ui.fcs.opcode;
371 subopcode = mimd.ui.fcs.subopcode;
372 adapno = GETADAP(mimd.ui.fcs.adapno);
373
374 if (adapno >= adapters_count_g)
375 return (-ENODEV);
376
377 kioc->adapno = adapno;
378 kioc->mb_type = MBOX_LEGACY;
379 kioc->app_type = APPTYPE_MIMD;
380
381 switch (opcode) {
382
383 case 0x82:
384
385 if (subopcode == MEGAIOC_QADAPINFO) {
386
387 kioc->opcode = GET_ADAP_INFO;
388 kioc->data_dir = UIOC_RD;
389 kioc->xferlen = sizeof(mraid_hba_info_t);
390
391 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
392 return (-ENOMEM);
393 }
394 else {
395 con_log(CL_ANN, (KERN_WARNING
396 "megaraid cmm: Invalid subop\n"));
397 return (-EINVAL);
398 }
399
400 break;
401
402 case 0x81:
403
404 kioc->opcode = MBOX_CMD;
405 kioc->xferlen = mimd.ui.fcs.length;
406 kioc->user_data_len = kioc->xferlen;
407 kioc->user_data = mimd.ui.fcs.buffer;
408
409 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
410 return (-ENOMEM);
411
412 if (mimd.outlen) kioc->data_dir = UIOC_RD;
413 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
414
415 break;
416
417 case 0x80:
418
419 kioc->opcode = MBOX_CMD;
420 kioc->xferlen = (mimd.outlen > mimd.inlen) ?
421 mimd.outlen : mimd.inlen;
422 kioc->user_data_len = kioc->xferlen;
423 kioc->user_data = mimd.data;
424
425 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
426 return (-ENOMEM);
427
428 if (mimd.outlen) kioc->data_dir = UIOC_RD;
429 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
430
431 break;
432
433 default:
434 return (-EINVAL);
435 }
436
437 /*
438 * If driver command, nothing else to do
439 */
440 if (opcode == 0x82)
441 return 0;
442
443 /*
444 * This is a mailbox cmd; copy the mailbox from mimd
445 */
446 mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
447 mbox = &mbox64->mbox32;
448 memcpy(mbox, mimd.mbox, 14);
449
450 if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD
451
452 mbox->xferaddr = (uint32_t)kioc->buf_paddr;
453
454 if (kioc->data_dir & UIOC_WR) {
455 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
456 kioc->xferlen)) {
457 return (-EFAULT);
458 }
459 }
460
461 return 0;
462 }
463
464 /*
465 * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
466 * Just like in above case, the beginning for memblk is treated as
467 * a mailbox. The passthru will begin at next 1K boundary. And the
468 * data will start 1K after that.
469 */
470 pthru32 = kioc->pthru32;
471 kioc->user_pthru = &umimd->pthru;
472 mbox->xferaddr = (uint32_t)kioc->pthru32_h;
473
474 if (copy_from_user(pthru32, kioc->user_pthru,
475 sizeof(mraid_passthru_t))) {
476 return (-EFAULT);
477 }
478
479 pthru32->dataxferaddr = kioc->buf_paddr;
480 if (kioc->data_dir & UIOC_WR) {
481 if (pthru32->dataxferlen > kioc->xferlen)
482 return -EINVAL;
483 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
484 pthru32->dataxferlen)) {
485 return (-EFAULT);
486 }
487 }
488
489 return 0;
490 }
491
492 /**
493 * mraid_mm_attch_buf - Attach a free dma buffer for required size
494 * @adp : Adapter softstate
495 * @kioc : kioc that the buffer needs to be attached to
496 * @xferlen : required length for buffer
497 *
498 * First we search for a pool with smallest buffer that is >= @xferlen. If
499 * that pool has no free buffer, we will try for the next bigger size. If none
500 * is available, we will try to allocate the smallest buffer that is >=
501 * @xferlen and attach it the pool.
502 */
503 static int
mraid_mm_attach_buf(mraid_mmadp_t * adp,uioc_t * kioc,int xferlen)504 mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
505 {
506 mm_dmapool_t *pool;
507 int right_pool = -1;
508 unsigned long flags;
509 int i;
510
511 kioc->pool_index = -1;
512 kioc->buf_vaddr = NULL;
513 kioc->buf_paddr = 0;
514 kioc->free_buf = 0;
515
516 /*
517 * We need xferlen amount of memory. See if we can get it from our
518 * dma pools. If we don't get exact size, we will try bigger buffer
519 */
520
521 for (i = 0; i < MAX_DMA_POOLS; i++) {
522
523 pool = &adp->dma_pool_list[i];
524
525 if (xferlen > pool->buf_size)
526 continue;
527
528 if (right_pool == -1)
529 right_pool = i;
530
531 spin_lock_irqsave(&pool->lock, flags);
532
533 if (!pool->in_use) {
534
535 pool->in_use = 1;
536 kioc->pool_index = i;
537 kioc->buf_vaddr = pool->vaddr;
538 kioc->buf_paddr = pool->paddr;
539
540 spin_unlock_irqrestore(&pool->lock, flags);
541 return 0;
542 }
543 else {
544 spin_unlock_irqrestore(&pool->lock, flags);
545 continue;
546 }
547 }
548
549 /*
550 * If xferlen doesn't match any of our pools, return error
551 */
552 if (right_pool == -1)
553 return -EINVAL;
554
555 /*
556 * We did not get any buffer from the preallocated pool. Let us try
557 * to allocate one new buffer. NOTE: This is a blocking call.
558 */
559 pool = &adp->dma_pool_list[right_pool];
560
561 spin_lock_irqsave(&pool->lock, flags);
562
563 kioc->pool_index = right_pool;
564 kioc->free_buf = 1;
565 kioc->buf_vaddr = dma_pool_alloc(pool->handle, GFP_ATOMIC,
566 &kioc->buf_paddr);
567 spin_unlock_irqrestore(&pool->lock, flags);
568
569 if (!kioc->buf_vaddr)
570 return -ENOMEM;
571
572 return 0;
573 }
574
575 /**
576 * mraid_mm_alloc_kioc - Returns a uioc_t from free list
577 * @adp : Adapter softstate for this module
578 *
579 * The kioc_semaphore is initialized with number of kioc nodes in the
580 * free kioc pool. If the kioc pool is empty, this function blocks till
581 * a kioc becomes free.
582 */
583 static uioc_t *
mraid_mm_alloc_kioc(mraid_mmadp_t * adp)584 mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
585 {
586 uioc_t *kioc;
587 struct list_head* head;
588 unsigned long flags;
589
590 down(&adp->kioc_semaphore);
591
592 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
593
594 head = &adp->kioc_pool;
595
596 if (list_empty(head)) {
597 up(&adp->kioc_semaphore);
598 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
599
600 con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
601 return NULL;
602 }
603
604 kioc = list_entry(head->next, uioc_t, list);
605 list_del_init(&kioc->list);
606
607 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
608
609 memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
610 memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
611
612 kioc->buf_vaddr = NULL;
613 kioc->buf_paddr = 0;
614 kioc->pool_index =-1;
615 kioc->free_buf = 0;
616 kioc->user_data = NULL;
617 kioc->user_data_len = 0;
618 kioc->user_pthru = NULL;
619 kioc->timedout = 0;
620
621 return kioc;
622 }
623
624 /**
625 * mraid_mm_dealloc_kioc - Return kioc to free pool
626 * @adp : Adapter softstate
627 * @kioc : uioc_t node to be returned to free pool
628 */
629 static void
mraid_mm_dealloc_kioc(mraid_mmadp_t * adp,uioc_t * kioc)630 mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
631 {
632 mm_dmapool_t *pool;
633 unsigned long flags;
634
635 if (kioc->pool_index != -1) {
636 pool = &adp->dma_pool_list[kioc->pool_index];
637
638 /* This routine may be called in non-isr context also */
639 spin_lock_irqsave(&pool->lock, flags);
640
641 /*
642 * While attaching the dma buffer, if we didn't get the
643 * required buffer from the pool, we would have allocated
644 * it at the run time and set the free_buf flag. We must
645 * free that buffer. Otherwise, just mark that the buffer is
646 * not in use
647 */
648 if (kioc->free_buf == 1)
649 dma_pool_free(pool->handle, kioc->buf_vaddr,
650 kioc->buf_paddr);
651 else
652 pool->in_use = 0;
653
654 spin_unlock_irqrestore(&pool->lock, flags);
655 }
656
657 /* Return the kioc to the free pool */
658 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
659 list_add(&kioc->list, &adp->kioc_pool);
660 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
661
662 /* increment the free kioc count */
663 up(&adp->kioc_semaphore);
664
665 return;
666 }
667
668 /**
669 * lld_ioctl - Routine to issue ioctl to low level drvr
670 * @adp : The adapter handle
671 * @kioc : The ioctl packet with kernel addresses
672 */
673 static int
lld_ioctl(mraid_mmadp_t * adp,uioc_t * kioc)674 lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
675 {
676 int rval;
677 struct uioc_timeout timeout = { };
678
679 kioc->status = -ENODATA;
680 rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
681
682 if (rval) return rval;
683
684 /*
685 * Start the timer
686 */
687 if (adp->timeout > 0) {
688 timeout.uioc = kioc;
689 timer_setup_on_stack(&timeout.timer, lld_timedout, 0);
690
691 timeout.timer.expires = jiffies + adp->timeout * HZ;
692
693 add_timer(&timeout.timer);
694 }
695
696 /*
697 * Wait till the low level driver completes the ioctl. After this
698 * call, the ioctl either completed successfully or timedout.
699 */
700 wait_event(wait_q, (kioc->status != -ENODATA));
701 if (timeout.timer.function) {
702 del_timer_sync(&timeout.timer);
703 destroy_timer_on_stack(&timeout.timer);
704 }
705
706 /*
707 * If the command had timedout, we mark the controller offline
708 * before returning
709 */
710 if (kioc->timedout) {
711 adp->quiescent = 0;
712 }
713
714 return kioc->status;
715 }
716
717
718 /**
719 * ioctl_done - callback from the low level driver
720 * @kioc : completed ioctl packet
721 */
722 static void
ioctl_done(uioc_t * kioc)723 ioctl_done(uioc_t *kioc)
724 {
725 uint32_t adapno;
726 int iterator;
727 mraid_mmadp_t* adapter;
728
729 /*
730 * When the kioc returns from driver, make sure it still doesn't
731 * have ENODATA in status. Otherwise, driver will hang on wait_event
732 * forever
733 */
734 if (kioc->status == -ENODATA) {
735 con_log(CL_ANN, (KERN_WARNING
736 "megaraid cmm: lld didn't change status!\n"));
737
738 kioc->status = -EINVAL;
739 }
740
741 /*
742 * Check if this kioc was timedout before. If so, nobody is waiting
743 * on this kioc. We don't have to wake up anybody. Instead, we just
744 * have to free the kioc
745 */
746 if (kioc->timedout) {
747 iterator = 0;
748 adapter = NULL;
749 adapno = kioc->adapno;
750
751 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
752 "ioctl that was timedout before\n"));
753
754 list_for_each_entry(adapter, &adapters_list_g, list) {
755 if (iterator++ == adapno) break;
756 }
757
758 kioc->timedout = 0;
759
760 if (adapter) {
761 mraid_mm_dealloc_kioc( adapter, kioc );
762 }
763 }
764 else {
765 wake_up(&wait_q);
766 }
767 }
768
769
770 /**
771 * lld_timedout - callback from the expired timer
772 * @t : timer that timed out
773 */
774 static void
lld_timedout(struct timer_list * t)775 lld_timedout(struct timer_list *t)
776 {
777 struct uioc_timeout *timeout = from_timer(timeout, t, timer);
778 uioc_t *kioc = timeout->uioc;
779
780 kioc->status = -ETIME;
781 kioc->timedout = 1;
782
783 con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
784
785 wake_up(&wait_q);
786 }
787
788
789 /**
790 * kioc_to_mimd - Converter from new back to old format
791 * @kioc : Kernel space IOCTL packet (successfully issued)
792 * @mimd : User space MIMD packet
793 */
794 static int
kioc_to_mimd(uioc_t * kioc,mimd_t __user * mimd)795 kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
796 {
797 mimd_t kmimd;
798 uint8_t opcode;
799 uint8_t subopcode;
800
801 mbox64_t *mbox64;
802 mraid_passthru_t __user *upthru32;
803 mraid_passthru_t *kpthru32;
804 mcontroller_t cinfo;
805 mraid_hba_info_t *hinfo;
806
807
808 if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
809 return (-EFAULT);
810
811 opcode = kmimd.ui.fcs.opcode;
812 subopcode = kmimd.ui.fcs.subopcode;
813
814 if (opcode == 0x82) {
815 switch (subopcode) {
816
817 case MEGAIOC_QADAPINFO:
818
819 hinfo = (mraid_hba_info_t *)(unsigned long)
820 kioc->buf_vaddr;
821
822 hinfo_to_cinfo(hinfo, &cinfo);
823
824 if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
825 return (-EFAULT);
826
827 return 0;
828
829 default:
830 return (-EINVAL);
831 }
832
833 return 0;
834 }
835
836 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
837
838 if (kioc->user_pthru) {
839
840 upthru32 = kioc->user_pthru;
841 kpthru32 = kioc->pthru32;
842
843 if (copy_to_user(&upthru32->scsistatus,
844 &kpthru32->scsistatus,
845 sizeof(uint8_t))) {
846 return (-EFAULT);
847 }
848 }
849
850 if (kioc->user_data) {
851 if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
852 kioc->user_data_len)) {
853 return (-EFAULT);
854 }
855 }
856
857 if (copy_to_user(&mimd->mbox[17],
858 &mbox64->mbox32.status, sizeof(uint8_t))) {
859 return (-EFAULT);
860 }
861
862 return 0;
863 }
864
865
866 /**
867 * hinfo_to_cinfo - Convert new format hba info into old format
868 * @hinfo : New format, more comprehensive adapter info
869 * @cinfo : Old format adapter info to support mimd_t apps
870 */
871 static void
hinfo_to_cinfo(mraid_hba_info_t * hinfo,mcontroller_t * cinfo)872 hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
873 {
874 if (!hinfo || !cinfo)
875 return;
876
877 cinfo->base = hinfo->baseport;
878 cinfo->irq = hinfo->irq;
879 cinfo->numldrv = hinfo->num_ldrv;
880 cinfo->pcibus = hinfo->pci_bus;
881 cinfo->pcidev = hinfo->pci_slot;
882 cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
883 cinfo->pciid = hinfo->pci_device_id;
884 cinfo->pcivendor = hinfo->pci_vendor_id;
885 cinfo->pcislot = hinfo->pci_slot;
886 cinfo->uid = hinfo->unique_id;
887 }
888
889
890 /**
891 * mraid_mm_register_adp - Registration routine for low level drivers
892 * @lld_adp : Adapter object
893 */
894 int
mraid_mm_register_adp(mraid_mmadp_t * lld_adp)895 mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
896 {
897 mraid_mmadp_t *adapter;
898 mbox64_t *mbox_list;
899 uioc_t *kioc;
900 uint32_t rval;
901 int i;
902
903
904 if (lld_adp->drvr_type != DRVRTYPE_MBOX)
905 return (-EINVAL);
906
907 adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
908
909 if (!adapter)
910 return -ENOMEM;
911
912
913 adapter->unique_id = lld_adp->unique_id;
914 adapter->drvr_type = lld_adp->drvr_type;
915 adapter->drvr_data = lld_adp->drvr_data;
916 adapter->pdev = lld_adp->pdev;
917 adapter->issue_uioc = lld_adp->issue_uioc;
918 adapter->timeout = lld_adp->timeout;
919 adapter->max_kioc = lld_adp->max_kioc;
920 adapter->quiescent = 1;
921
922 /*
923 * Allocate single blocks of memory for all required kiocs,
924 * mailboxes and passthru structures.
925 */
926 adapter->kioc_list = kmalloc_array(lld_adp->max_kioc,
927 sizeof(uioc_t),
928 GFP_KERNEL);
929 adapter->mbox_list = kmalloc_array(lld_adp->max_kioc,
930 sizeof(mbox64_t),
931 GFP_KERNEL);
932 adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
933 &adapter->pdev->dev,
934 sizeof(mraid_passthru_t),
935 16, 0);
936
937 if (!adapter->kioc_list || !adapter->mbox_list ||
938 !adapter->pthru_dma_pool) {
939
940 con_log(CL_ANN, (KERN_WARNING
941 "megaraid cmm: out of memory, %s %d\n", __func__,
942 __LINE__));
943
944 rval = (-ENOMEM);
945
946 goto memalloc_error;
947 }
948
949 /*
950 * Slice kioc_list and make a kioc_pool with the individiual kiocs
951 */
952 INIT_LIST_HEAD(&adapter->kioc_pool);
953 spin_lock_init(&adapter->kioc_pool_lock);
954 sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
955
956 mbox_list = (mbox64_t *)adapter->mbox_list;
957
958 for (i = 0; i < lld_adp->max_kioc; i++) {
959
960 kioc = adapter->kioc_list + i;
961 kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
962 kioc->pthru32 = dma_pool_alloc(adapter->pthru_dma_pool,
963 GFP_KERNEL, &kioc->pthru32_h);
964
965 if (!kioc->pthru32) {
966
967 con_log(CL_ANN, (KERN_WARNING
968 "megaraid cmm: out of memory, %s %d\n",
969 __func__, __LINE__));
970
971 rval = (-ENOMEM);
972
973 goto pthru_dma_pool_error;
974 }
975
976 list_add_tail(&kioc->list, &adapter->kioc_pool);
977 }
978
979 // Setup the dma pools for data buffers
980 if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
981 goto dma_pool_error;
982 }
983
984 list_add_tail(&adapter->list, &adapters_list_g);
985
986 adapters_count_g++;
987
988 return 0;
989
990 dma_pool_error:
991 /* Do nothing */
992
993 pthru_dma_pool_error:
994
995 for (i = 0; i < lld_adp->max_kioc; i++) {
996 kioc = adapter->kioc_list + i;
997 if (kioc->pthru32) {
998 dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
999 kioc->pthru32_h);
1000 }
1001 }
1002
1003 memalloc_error:
1004
1005 kfree(adapter->kioc_list);
1006 kfree(adapter->mbox_list);
1007
1008 dma_pool_destroy(adapter->pthru_dma_pool);
1009
1010 kfree(adapter);
1011
1012 return rval;
1013 }
1014
1015
1016 /**
1017 * mraid_mm_adapter_app_handle - return the application handle for this adapter
1018 * @unique_id : adapter unique identifier
1019 *
1020 * For the given driver data, locate the adapter in our global list and
1021 * return the corresponding handle, which is also used by applications to
1022 * uniquely identify an adapter.
1023 *
1024 * Return adapter handle if found in the list.
1025 * Return 0 if adapter could not be located, should never happen though.
1026 */
1027 uint32_t
mraid_mm_adapter_app_handle(uint32_t unique_id)1028 mraid_mm_adapter_app_handle(uint32_t unique_id)
1029 {
1030 mraid_mmadp_t *adapter;
1031 mraid_mmadp_t *tmp;
1032 int index = 0;
1033
1034 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1035
1036 if (adapter->unique_id == unique_id) {
1037
1038 return MKADAP(index);
1039 }
1040
1041 index++;
1042 }
1043
1044 return 0;
1045 }
1046
1047
1048 /**
1049 * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
1050 * @adp : Adapter softstate
1051 *
1052 * We maintain a pool of dma buffers per each adapter. Each pool has one
1053 * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
1054 * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
1055 * dont' want to waste too much memory by allocating more buffers per each
1056 * pool.
1057 */
1058 static int
mraid_mm_setup_dma_pools(mraid_mmadp_t * adp)1059 mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
1060 {
1061 mm_dmapool_t *pool;
1062 int bufsize;
1063 int i;
1064
1065 /*
1066 * Create MAX_DMA_POOLS number of pools
1067 */
1068 bufsize = MRAID_MM_INIT_BUFF_SIZE;
1069
1070 for (i = 0; i < MAX_DMA_POOLS; i++){
1071
1072 pool = &adp->dma_pool_list[i];
1073
1074 pool->buf_size = bufsize;
1075 spin_lock_init(&pool->lock);
1076
1077 pool->handle = dma_pool_create("megaraid mm data buffer",
1078 &adp->pdev->dev, bufsize,
1079 16, 0);
1080
1081 if (!pool->handle) {
1082 goto dma_pool_setup_error;
1083 }
1084
1085 pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
1086 &pool->paddr);
1087
1088 if (!pool->vaddr)
1089 goto dma_pool_setup_error;
1090
1091 bufsize = bufsize * 2;
1092 }
1093
1094 return 0;
1095
1096 dma_pool_setup_error:
1097
1098 mraid_mm_teardown_dma_pools(adp);
1099 return (-ENOMEM);
1100 }
1101
1102
1103 /**
1104 * mraid_mm_unregister_adp - Unregister routine for low level drivers
1105 * @unique_id : UID of the adpater
1106 *
1107 * Assumes no outstanding ioctls to llds.
1108 */
1109 int
mraid_mm_unregister_adp(uint32_t unique_id)1110 mraid_mm_unregister_adp(uint32_t unique_id)
1111 {
1112 mraid_mmadp_t *adapter;
1113 mraid_mmadp_t *tmp;
1114
1115 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1116
1117
1118 if (adapter->unique_id == unique_id) {
1119
1120 adapters_count_g--;
1121
1122 list_del_init(&adapter->list);
1123
1124 mraid_mm_free_adp_resources(adapter);
1125
1126 kfree(adapter);
1127
1128 con_log(CL_ANN, (
1129 "megaraid cmm: Unregistered one adapter:%#x\n",
1130 unique_id));
1131
1132 return 0;
1133 }
1134 }
1135
1136 return (-ENODEV);
1137 }
1138
1139 /**
1140 * mraid_mm_free_adp_resources - Free adapter softstate
1141 * @adp : Adapter softstate
1142 */
1143 static void
mraid_mm_free_adp_resources(mraid_mmadp_t * adp)1144 mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
1145 {
1146 uioc_t *kioc;
1147 int i;
1148
1149 mraid_mm_teardown_dma_pools(adp);
1150
1151 for (i = 0; i < adp->max_kioc; i++) {
1152
1153 kioc = adp->kioc_list + i;
1154
1155 dma_pool_free(adp->pthru_dma_pool, kioc->pthru32,
1156 kioc->pthru32_h);
1157 }
1158
1159 kfree(adp->kioc_list);
1160 kfree(adp->mbox_list);
1161
1162 dma_pool_destroy(adp->pthru_dma_pool);
1163
1164
1165 return;
1166 }
1167
1168
1169 /**
1170 * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
1171 * @adp : Adapter softstate
1172 */
1173 static void
mraid_mm_teardown_dma_pools(mraid_mmadp_t * adp)1174 mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1175 {
1176 int i;
1177 mm_dmapool_t *pool;
1178
1179 for (i = 0; i < MAX_DMA_POOLS; i++) {
1180
1181 pool = &adp->dma_pool_list[i];
1182
1183 if (pool->handle) {
1184
1185 if (pool->vaddr)
1186 dma_pool_free(pool->handle, pool->vaddr,
1187 pool->paddr);
1188
1189 dma_pool_destroy(pool->handle);
1190 pool->handle = NULL;
1191 }
1192 }
1193
1194 return;
1195 }
1196
1197 /**
1198 * mraid_mm_init - Module entry point
1199 */
1200 static int __init
mraid_mm_init(void)1201 mraid_mm_init(void)
1202 {
1203 int err;
1204
1205 // Announce the driver version
1206 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
1207 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
1208
1209 err = misc_register(&megaraid_mm_dev);
1210 if (err < 0) {
1211 con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
1212 return err;
1213 }
1214
1215 init_waitqueue_head(&wait_q);
1216
1217 INIT_LIST_HEAD(&adapters_list_g);
1218
1219 return 0;
1220 }
1221
1222
1223 /**
1224 * mraid_mm_exit - Module exit point
1225 */
1226 static void __exit
mraid_mm_exit(void)1227 mraid_mm_exit(void)
1228 {
1229 con_log(CL_DLEVEL1 , ("exiting common mod\n"));
1230
1231 misc_deregister(&megaraid_mm_dev);
1232 }
1233
1234 module_init(mraid_mm_init);
1235 module_exit(mraid_mm_exit);
1236
1237 /* vi: set ts=8 sw=8 tw=78: */
1238