1 /*
2 *
3 * Linux MegaRAID device driver
4 *
5 * Copyright (c) 2003-2004 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_mm.c
13 * Version : v2.20.2.7 (Jul 16 2006)
14 *
15 * Common management module
16 */
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/mutex.h>
20 #include "megaraid_mm.h"
21
22
23 // Entry points for char node driver
24 static DEFINE_MUTEX(mraid_mm_mutex);
25 static int mraid_mm_open(struct inode *, struct file *);
26 static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
27
28
29 // routines to convert to and from the old the format
30 static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
31 static int kioc_to_mimd(uioc_t *, mimd_t __user *);
32
33
34 // Helper functions
35 static int handle_drvrcmd(void __user *, uint8_t, int *);
36 static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
37 static void ioctl_done(uioc_t *);
38 static void lld_timedout(struct timer_list *);
39 static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
40 static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
41 static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
42 static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
43 static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
44 static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
45 static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
46 static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
47
48 #ifdef CONFIG_COMPAT
49 static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
50 #endif
51
52 MODULE_AUTHOR("LSI Logic Corporation");
53 MODULE_DESCRIPTION("LSI Logic Management Module");
54 MODULE_LICENSE("GPL");
55 MODULE_VERSION(LSI_COMMON_MOD_VERSION);
56
57 static int dbglevel = CL_ANN;
58 module_param_named(dlevel, dbglevel, int, 0);
59 MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
60
61 EXPORT_SYMBOL(mraid_mm_register_adp);
62 EXPORT_SYMBOL(mraid_mm_unregister_adp);
63 EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
64
65 static uint32_t drvr_ver = 0x02200207;
66
67 static int adapters_count_g;
68 static struct list_head adapters_list_g;
69
70 static wait_queue_head_t wait_q;
71
72 static const struct file_operations lsi_fops = {
73 .open = mraid_mm_open,
74 .unlocked_ioctl = mraid_mm_unlocked_ioctl,
75 #ifdef CONFIG_COMPAT
76 .compat_ioctl = mraid_mm_compat_ioctl,
77 #endif
78 .owner = THIS_MODULE,
79 .llseek = noop_llseek,
80 };
81
82 static struct miscdevice megaraid_mm_dev = {
83 .minor = MISC_DYNAMIC_MINOR,
84 .name = "megadev0",
85 .fops = &lsi_fops,
86 };
87
88 /**
89 * mraid_mm_open - open routine for char node interface
90 * @inode : unused
91 * @filep : unused
92 *
93 * Allow ioctl operations by apps only if they have superuser privilege.
94 */
95 static int
mraid_mm_open(struct inode * inode,struct file * filep)96 mraid_mm_open(struct inode *inode, struct file *filep)
97 {
98 /*
99 * Only allow superuser to access private ioctl interface
100 */
101 if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
102
103 return 0;
104 }
105
106 /**
107 * mraid_mm_ioctl - module entry-point for ioctls
108 * @inode : inode (ignored)
109 * @filep : file operations pointer (ignored)
110 * @cmd : ioctl command
111 * @arg : user ioctl packet
112 */
113 static int
mraid_mm_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)114 mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
115 {
116 uioc_t *kioc;
117 char signature[EXT_IOCTL_SIGN_SZ] = {0};
118 int rval;
119 mraid_mmadp_t *adp;
120 uint8_t old_ioctl;
121 int drvrcmd_rval;
122 void __user *argp = (void __user *)arg;
123
124 /*
125 * Make sure only USCSICMD are issued through this interface.
126 * MIMD application would still fire different command.
127 */
128
129 if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
130 return (-EINVAL);
131 }
132
133 /*
134 * Look for signature to see if this is the new or old ioctl format.
135 */
136 if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
137 con_log(CL_ANN, (KERN_WARNING
138 "megaraid cmm: copy from usr addr failed\n"));
139 return (-EFAULT);
140 }
141
142 if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
143 old_ioctl = 0;
144 else
145 old_ioctl = 1;
146
147 /*
148 * At present, we don't support the new ioctl packet
149 */
150 if (!old_ioctl )
151 return (-EINVAL);
152
153 /*
154 * If it is a driver ioctl (as opposed to fw ioctls), then we can
155 * handle the command locally. rval > 0 means it is not a drvr cmd
156 */
157 rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
158
159 if (rval < 0)
160 return rval;
161 else if (rval == 0)
162 return drvrcmd_rval;
163
164 rval = 0;
165 if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
166 return rval;
167 }
168
169 /*
170 * Check if adapter can accept ioctl. We may have marked it offline
171 * if any previous kioc had timedout on this controller.
172 */
173 if (!adp->quiescent) {
174 con_log(CL_ANN, (KERN_WARNING
175 "megaraid cmm: controller cannot accept cmds due to "
176 "earlier errors\n" ));
177 return -EFAULT;
178 }
179
180 /*
181 * The following call will block till a kioc is available
182 * or return NULL if the list head is empty for the pointer
183 * of type mraid_mmapt passed to mraid_mm_alloc_kioc
184 */
185 kioc = mraid_mm_alloc_kioc(adp);
186 if (!kioc)
187 return -ENXIO;
188
189 /*
190 * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
191 */
192 if ((rval = mimd_to_kioc(argp, adp, kioc))) {
193 mraid_mm_dealloc_kioc(adp, kioc);
194 return rval;
195 }
196
197 kioc->done = ioctl_done;
198
199 /*
200 * Issue the IOCTL to the low level driver. After the IOCTL completes
201 * release the kioc if and only if it was _not_ timedout. If it was
202 * timedout, that means that resources are still with low level driver.
203 */
204 if ((rval = lld_ioctl(adp, kioc))) {
205
206 if (!kioc->timedout)
207 mraid_mm_dealloc_kioc(adp, kioc);
208
209 return rval;
210 }
211
212 /*
213 * Convert the kioc back to user space
214 */
215 rval = kioc_to_mimd(kioc, argp);
216
217 /*
218 * Return the kioc to free pool
219 */
220 mraid_mm_dealloc_kioc(adp, kioc);
221
222 return rval;
223 }
224
225 static long
mraid_mm_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)226 mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
227 unsigned long arg)
228 {
229 int err;
230
231 /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
232 mutex_lock(&mraid_mm_mutex);
233 err = mraid_mm_ioctl(filep, cmd, arg);
234 mutex_unlock(&mraid_mm_mutex);
235
236 return err;
237 }
238
239 /**
240 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
241 * @umimd : User space mimd_t ioctl packet
242 * @rval : returned success/error status
243 *
244 * The function return value is a pointer to the located @adapter.
245 */
246 static mraid_mmadp_t *
mraid_mm_get_adapter(mimd_t __user * umimd,int * rval)247 mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
248 {
249 mraid_mmadp_t *adapter;
250 mimd_t mimd;
251 uint32_t adapno;
252 int iterator;
253
254
255 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
256 *rval = -EFAULT;
257 return NULL;
258 }
259
260 adapno = GETADAP(mimd.ui.fcs.adapno);
261
262 if (adapno >= adapters_count_g) {
263 *rval = -ENODEV;
264 return NULL;
265 }
266
267 adapter = NULL;
268 iterator = 0;
269
270 list_for_each_entry(adapter, &adapters_list_g, list) {
271 if (iterator++ == adapno) break;
272 }
273
274 if (!adapter) {
275 *rval = -ENODEV;
276 return NULL;
277 }
278
279 return adapter;
280 }
281
282 /**
283 * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
284 * @arg : packet sent by the user app
285 * @old_ioctl : mimd if 1; uioc otherwise
286 * @rval : pointer for command's returned value (not function status)
287 */
288 static int
handle_drvrcmd(void __user * arg,uint8_t old_ioctl,int * rval)289 handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
290 {
291 mimd_t __user *umimd;
292 mimd_t kmimd;
293 uint8_t opcode;
294 uint8_t subopcode;
295
296 if (old_ioctl)
297 goto old_packet;
298 else
299 goto new_packet;
300
301 new_packet:
302 return (-ENOTSUPP);
303
304 old_packet:
305 *rval = 0;
306 umimd = arg;
307
308 if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
309 return (-EFAULT);
310
311 opcode = kmimd.ui.fcs.opcode;
312 subopcode = kmimd.ui.fcs.subopcode;
313
314 /*
315 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
316 * GET_NUMADP, then we can handle. Otherwise we should return 1 to
317 * indicate that we cannot handle this.
318 */
319 if (opcode != 0x82)
320 return 1;
321
322 switch (subopcode) {
323
324 case MEGAIOC_QDRVRVER:
325
326 if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
327 return (-EFAULT);
328
329 return 0;
330
331 case MEGAIOC_QNADAP:
332
333 *rval = adapters_count_g;
334
335 if (copy_to_user(kmimd.data, &adapters_count_g,
336 sizeof(uint32_t)))
337 return (-EFAULT);
338
339 return 0;
340
341 default:
342 /* cannot handle */
343 return 1;
344 }
345
346 return 0;
347 }
348
349
350 /**
351 * mimd_to_kioc - Converter from old to new ioctl format
352 * @umimd : user space old MIMD IOCTL
353 * @adp : adapter softstate
354 * @kioc : kernel space new format IOCTL
355 *
356 * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
357 * new packet is in kernel space so that driver can perform operations on it
358 * freely.
359 */
360
361 static int
mimd_to_kioc(mimd_t __user * umimd,mraid_mmadp_t * adp,uioc_t * kioc)362 mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
363 {
364 mbox64_t *mbox64;
365 mbox_t *mbox;
366 mraid_passthru_t *pthru32;
367 uint32_t adapno;
368 uint8_t opcode;
369 uint8_t subopcode;
370 mimd_t mimd;
371
372 if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
373 return (-EFAULT);
374
375 /*
376 * Applications are not allowed to send extd pthru
377 */
378 if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
379 (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
380 return (-EINVAL);
381
382 opcode = mimd.ui.fcs.opcode;
383 subopcode = mimd.ui.fcs.subopcode;
384 adapno = GETADAP(mimd.ui.fcs.adapno);
385
386 if (adapno >= adapters_count_g)
387 return (-ENODEV);
388
389 kioc->adapno = adapno;
390 kioc->mb_type = MBOX_LEGACY;
391 kioc->app_type = APPTYPE_MIMD;
392
393 switch (opcode) {
394
395 case 0x82:
396
397 if (subopcode == MEGAIOC_QADAPINFO) {
398
399 kioc->opcode = GET_ADAP_INFO;
400 kioc->data_dir = UIOC_RD;
401 kioc->xferlen = sizeof(mraid_hba_info_t);
402
403 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
404 return (-ENOMEM);
405 }
406 else {
407 con_log(CL_ANN, (KERN_WARNING
408 "megaraid cmm: Invalid subop\n"));
409 return (-EINVAL);
410 }
411
412 break;
413
414 case 0x81:
415
416 kioc->opcode = MBOX_CMD;
417 kioc->xferlen = mimd.ui.fcs.length;
418 kioc->user_data_len = kioc->xferlen;
419 kioc->user_data = mimd.ui.fcs.buffer;
420
421 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
422 return (-ENOMEM);
423
424 if (mimd.outlen) kioc->data_dir = UIOC_RD;
425 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
426
427 break;
428
429 case 0x80:
430
431 kioc->opcode = MBOX_CMD;
432 kioc->xferlen = (mimd.outlen > mimd.inlen) ?
433 mimd.outlen : mimd.inlen;
434 kioc->user_data_len = kioc->xferlen;
435 kioc->user_data = mimd.data;
436
437 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
438 return (-ENOMEM);
439
440 if (mimd.outlen) kioc->data_dir = UIOC_RD;
441 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
442
443 break;
444
445 default:
446 return (-EINVAL);
447 }
448
449 /*
450 * If driver command, nothing else to do
451 */
452 if (opcode == 0x82)
453 return 0;
454
455 /*
456 * This is a mailbox cmd; copy the mailbox from mimd
457 */
458 mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
459 mbox = &mbox64->mbox32;
460 memcpy(mbox, mimd.mbox, 14);
461
462 if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD
463
464 mbox->xferaddr = (uint32_t)kioc->buf_paddr;
465
466 if (kioc->data_dir & UIOC_WR) {
467 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
468 kioc->xferlen)) {
469 return (-EFAULT);
470 }
471 }
472
473 return 0;
474 }
475
476 /*
477 * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
478 * Just like in above case, the beginning for memblk is treated as
479 * a mailbox. The passthru will begin at next 1K boundary. And the
480 * data will start 1K after that.
481 */
482 pthru32 = kioc->pthru32;
483 kioc->user_pthru = &umimd->pthru;
484 mbox->xferaddr = (uint32_t)kioc->pthru32_h;
485
486 if (copy_from_user(pthru32, kioc->user_pthru,
487 sizeof(mraid_passthru_t))) {
488 return (-EFAULT);
489 }
490
491 pthru32->dataxferaddr = kioc->buf_paddr;
492 if (kioc->data_dir & UIOC_WR) {
493 if (pthru32->dataxferlen > kioc->xferlen)
494 return -EINVAL;
495 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
496 pthru32->dataxferlen)) {
497 return (-EFAULT);
498 }
499 }
500
501 return 0;
502 }
503
504 /**
505 * mraid_mm_attch_buf - Attach a free dma buffer for required size
506 * @adp : Adapter softstate
507 * @kioc : kioc that the buffer needs to be attached to
508 * @xferlen : required length for buffer
509 *
510 * First we search for a pool with smallest buffer that is >= @xferlen. If
511 * that pool has no free buffer, we will try for the next bigger size. If none
512 * is available, we will try to allocate the smallest buffer that is >=
513 * @xferlen and attach it the pool.
514 */
515 static int
mraid_mm_attach_buf(mraid_mmadp_t * adp,uioc_t * kioc,int xferlen)516 mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
517 {
518 mm_dmapool_t *pool;
519 int right_pool = -1;
520 unsigned long flags;
521 int i;
522
523 kioc->pool_index = -1;
524 kioc->buf_vaddr = NULL;
525 kioc->buf_paddr = 0;
526 kioc->free_buf = 0;
527
528 /*
529 * We need xferlen amount of memory. See if we can get it from our
530 * dma pools. If we don't get exact size, we will try bigger buffer
531 */
532
533 for (i = 0; i < MAX_DMA_POOLS; i++) {
534
535 pool = &adp->dma_pool_list[i];
536
537 if (xferlen > pool->buf_size)
538 continue;
539
540 if (right_pool == -1)
541 right_pool = i;
542
543 spin_lock_irqsave(&pool->lock, flags);
544
545 if (!pool->in_use) {
546
547 pool->in_use = 1;
548 kioc->pool_index = i;
549 kioc->buf_vaddr = pool->vaddr;
550 kioc->buf_paddr = pool->paddr;
551
552 spin_unlock_irqrestore(&pool->lock, flags);
553 return 0;
554 }
555 else {
556 spin_unlock_irqrestore(&pool->lock, flags);
557 continue;
558 }
559 }
560
561 /*
562 * If xferlen doesn't match any of our pools, return error
563 */
564 if (right_pool == -1)
565 return -EINVAL;
566
567 /*
568 * We did not get any buffer from the preallocated pool. Let us try
569 * to allocate one new buffer. NOTE: This is a blocking call.
570 */
571 pool = &adp->dma_pool_list[right_pool];
572
573 spin_lock_irqsave(&pool->lock, flags);
574
575 kioc->pool_index = right_pool;
576 kioc->free_buf = 1;
577 kioc->buf_vaddr = dma_pool_alloc(pool->handle, GFP_ATOMIC,
578 &kioc->buf_paddr);
579 spin_unlock_irqrestore(&pool->lock, flags);
580
581 if (!kioc->buf_vaddr)
582 return -ENOMEM;
583
584 return 0;
585 }
586
587 /**
588 * mraid_mm_alloc_kioc - Returns a uioc_t from free list
589 * @adp : Adapter softstate for this module
590 *
591 * The kioc_semaphore is initialized with number of kioc nodes in the
592 * free kioc pool. If the kioc pool is empty, this function blocks till
593 * a kioc becomes free.
594 */
595 static uioc_t *
mraid_mm_alloc_kioc(mraid_mmadp_t * adp)596 mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
597 {
598 uioc_t *kioc;
599 struct list_head* head;
600 unsigned long flags;
601
602 down(&adp->kioc_semaphore);
603
604 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
605
606 head = &adp->kioc_pool;
607
608 if (list_empty(head)) {
609 up(&adp->kioc_semaphore);
610 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
611
612 con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
613 return NULL;
614 }
615
616 kioc = list_entry(head->next, uioc_t, list);
617 list_del_init(&kioc->list);
618
619 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
620
621 memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
622 memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
623
624 kioc->buf_vaddr = NULL;
625 kioc->buf_paddr = 0;
626 kioc->pool_index =-1;
627 kioc->free_buf = 0;
628 kioc->user_data = NULL;
629 kioc->user_data_len = 0;
630 kioc->user_pthru = NULL;
631 kioc->timedout = 0;
632
633 return kioc;
634 }
635
636 /**
637 * mraid_mm_dealloc_kioc - Return kioc to free pool
638 * @adp : Adapter softstate
639 * @kioc : uioc_t node to be returned to free pool
640 */
641 static void
mraid_mm_dealloc_kioc(mraid_mmadp_t * adp,uioc_t * kioc)642 mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
643 {
644 mm_dmapool_t *pool;
645 unsigned long flags;
646
647 if (kioc->pool_index != -1) {
648 pool = &adp->dma_pool_list[kioc->pool_index];
649
650 /* This routine may be called in non-isr context also */
651 spin_lock_irqsave(&pool->lock, flags);
652
653 /*
654 * While attaching the dma buffer, if we didn't get the
655 * required buffer from the pool, we would have allocated
656 * it at the run time and set the free_buf flag. We must
657 * free that buffer. Otherwise, just mark that the buffer is
658 * not in use
659 */
660 if (kioc->free_buf == 1)
661 dma_pool_free(pool->handle, kioc->buf_vaddr,
662 kioc->buf_paddr);
663 else
664 pool->in_use = 0;
665
666 spin_unlock_irqrestore(&pool->lock, flags);
667 }
668
669 /* Return the kioc to the free pool */
670 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
671 list_add(&kioc->list, &adp->kioc_pool);
672 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
673
674 /* increment the free kioc count */
675 up(&adp->kioc_semaphore);
676
677 return;
678 }
679
680 /**
681 * lld_ioctl - Routine to issue ioctl to low level drvr
682 * @adp : The adapter handle
683 * @kioc : The ioctl packet with kernel addresses
684 */
685 static int
lld_ioctl(mraid_mmadp_t * adp,uioc_t * kioc)686 lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
687 {
688 int rval;
689 struct uioc_timeout timeout = { };
690
691 kioc->status = -ENODATA;
692 rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
693
694 if (rval) return rval;
695
696 /*
697 * Start the timer
698 */
699 if (adp->timeout > 0) {
700 timeout.uioc = kioc;
701 timer_setup_on_stack(&timeout.timer, lld_timedout, 0);
702
703 timeout.timer.expires = jiffies + adp->timeout * HZ;
704
705 add_timer(&timeout.timer);
706 }
707
708 /*
709 * Wait till the low level driver completes the ioctl. After this
710 * call, the ioctl either completed successfully or timedout.
711 */
712 wait_event(wait_q, (kioc->status != -ENODATA));
713 if (timeout.timer.function) {
714 del_timer_sync(&timeout.timer);
715 destroy_timer_on_stack(&timeout.timer);
716 }
717
718 /*
719 * If the command had timedout, we mark the controller offline
720 * before returning
721 */
722 if (kioc->timedout) {
723 adp->quiescent = 0;
724 }
725
726 return kioc->status;
727 }
728
729
730 /**
731 * ioctl_done - callback from the low level driver
732 * @kioc : completed ioctl packet
733 */
734 static void
ioctl_done(uioc_t * kioc)735 ioctl_done(uioc_t *kioc)
736 {
737 uint32_t adapno;
738 int iterator;
739 mraid_mmadp_t* adapter;
740
741 /*
742 * When the kioc returns from driver, make sure it still doesn't
743 * have ENODATA in status. Otherwise, driver will hang on wait_event
744 * forever
745 */
746 if (kioc->status == -ENODATA) {
747 con_log(CL_ANN, (KERN_WARNING
748 "megaraid cmm: lld didn't change status!\n"));
749
750 kioc->status = -EINVAL;
751 }
752
753 /*
754 * Check if this kioc was timedout before. If so, nobody is waiting
755 * on this kioc. We don't have to wake up anybody. Instead, we just
756 * have to free the kioc
757 */
758 if (kioc->timedout) {
759 iterator = 0;
760 adapter = NULL;
761 adapno = kioc->adapno;
762
763 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
764 "ioctl that was timedout before\n"));
765
766 list_for_each_entry(adapter, &adapters_list_g, list) {
767 if (iterator++ == adapno) break;
768 }
769
770 kioc->timedout = 0;
771
772 if (adapter) {
773 mraid_mm_dealloc_kioc( adapter, kioc );
774 }
775 }
776 else {
777 wake_up(&wait_q);
778 }
779 }
780
781
782 /**
783 * lld_timedout - callback from the expired timer
784 * @t : timer that timed out
785 */
786 static void
lld_timedout(struct timer_list * t)787 lld_timedout(struct timer_list *t)
788 {
789 struct uioc_timeout *timeout = from_timer(timeout, t, timer);
790 uioc_t *kioc = timeout->uioc;
791
792 kioc->status = -ETIME;
793 kioc->timedout = 1;
794
795 con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
796
797 wake_up(&wait_q);
798 }
799
800
801 /**
802 * kioc_to_mimd - Converter from new back to old format
803 * @kioc : Kernel space IOCTL packet (successfully issued)
804 * @mimd : User space MIMD packet
805 */
806 static int
kioc_to_mimd(uioc_t * kioc,mimd_t __user * mimd)807 kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
808 {
809 mimd_t kmimd;
810 uint8_t opcode;
811 uint8_t subopcode;
812
813 mbox64_t *mbox64;
814 mraid_passthru_t __user *upthru32;
815 mraid_passthru_t *kpthru32;
816 mcontroller_t cinfo;
817 mraid_hba_info_t *hinfo;
818
819
820 if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
821 return (-EFAULT);
822
823 opcode = kmimd.ui.fcs.opcode;
824 subopcode = kmimd.ui.fcs.subopcode;
825
826 if (opcode == 0x82) {
827 switch (subopcode) {
828
829 case MEGAIOC_QADAPINFO:
830
831 hinfo = (mraid_hba_info_t *)(unsigned long)
832 kioc->buf_vaddr;
833
834 hinfo_to_cinfo(hinfo, &cinfo);
835
836 if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
837 return (-EFAULT);
838
839 return 0;
840
841 default:
842 return (-EINVAL);
843 }
844
845 return 0;
846 }
847
848 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
849
850 if (kioc->user_pthru) {
851
852 upthru32 = kioc->user_pthru;
853 kpthru32 = kioc->pthru32;
854
855 if (copy_to_user(&upthru32->scsistatus,
856 &kpthru32->scsistatus,
857 sizeof(uint8_t))) {
858 return (-EFAULT);
859 }
860 }
861
862 if (kioc->user_data) {
863 if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
864 kioc->user_data_len)) {
865 return (-EFAULT);
866 }
867 }
868
869 if (copy_to_user(&mimd->mbox[17],
870 &mbox64->mbox32.status, sizeof(uint8_t))) {
871 return (-EFAULT);
872 }
873
874 return 0;
875 }
876
877
878 /**
879 * hinfo_to_cinfo - Convert new format hba info into old format
880 * @hinfo : New format, more comprehensive adapter info
881 * @cinfo : Old format adapter info to support mimd_t apps
882 */
883 static void
hinfo_to_cinfo(mraid_hba_info_t * hinfo,mcontroller_t * cinfo)884 hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
885 {
886 if (!hinfo || !cinfo)
887 return;
888
889 cinfo->base = hinfo->baseport;
890 cinfo->irq = hinfo->irq;
891 cinfo->numldrv = hinfo->num_ldrv;
892 cinfo->pcibus = hinfo->pci_bus;
893 cinfo->pcidev = hinfo->pci_slot;
894 cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
895 cinfo->pciid = hinfo->pci_device_id;
896 cinfo->pcivendor = hinfo->pci_vendor_id;
897 cinfo->pcislot = hinfo->pci_slot;
898 cinfo->uid = hinfo->unique_id;
899 }
900
901
902 /**
903 * mraid_mm_register_adp - Registration routine for low level drivers
904 * @lld_adp : Adapter object
905 */
906 int
mraid_mm_register_adp(mraid_mmadp_t * lld_adp)907 mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
908 {
909 mraid_mmadp_t *adapter;
910 mbox64_t *mbox_list;
911 uioc_t *kioc;
912 uint32_t rval;
913 int i;
914
915
916 if (lld_adp->drvr_type != DRVRTYPE_MBOX)
917 return (-EINVAL);
918
919 adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
920
921 if (!adapter)
922 return -ENOMEM;
923
924
925 adapter->unique_id = lld_adp->unique_id;
926 adapter->drvr_type = lld_adp->drvr_type;
927 adapter->drvr_data = lld_adp->drvr_data;
928 adapter->pdev = lld_adp->pdev;
929 adapter->issue_uioc = lld_adp->issue_uioc;
930 adapter->timeout = lld_adp->timeout;
931 adapter->max_kioc = lld_adp->max_kioc;
932 adapter->quiescent = 1;
933
934 /*
935 * Allocate single blocks of memory for all required kiocs,
936 * mailboxes and passthru structures.
937 */
938 adapter->kioc_list = kmalloc_array(lld_adp->max_kioc,
939 sizeof(uioc_t),
940 GFP_KERNEL);
941 adapter->mbox_list = kmalloc_array(lld_adp->max_kioc,
942 sizeof(mbox64_t),
943 GFP_KERNEL);
944 adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
945 &adapter->pdev->dev,
946 sizeof(mraid_passthru_t),
947 16, 0);
948
949 if (!adapter->kioc_list || !adapter->mbox_list ||
950 !adapter->pthru_dma_pool) {
951
952 con_log(CL_ANN, (KERN_WARNING
953 "megaraid cmm: out of memory, %s %d\n", __func__,
954 __LINE__));
955
956 rval = (-ENOMEM);
957
958 goto memalloc_error;
959 }
960
961 /*
962 * Slice kioc_list and make a kioc_pool with the individiual kiocs
963 */
964 INIT_LIST_HEAD(&adapter->kioc_pool);
965 spin_lock_init(&adapter->kioc_pool_lock);
966 sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
967
968 mbox_list = (mbox64_t *)adapter->mbox_list;
969
970 for (i = 0; i < lld_adp->max_kioc; i++) {
971
972 kioc = adapter->kioc_list + i;
973 kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
974 kioc->pthru32 = dma_pool_alloc(adapter->pthru_dma_pool,
975 GFP_KERNEL, &kioc->pthru32_h);
976
977 if (!kioc->pthru32) {
978
979 con_log(CL_ANN, (KERN_WARNING
980 "megaraid cmm: out of memory, %s %d\n",
981 __func__, __LINE__));
982
983 rval = (-ENOMEM);
984
985 goto pthru_dma_pool_error;
986 }
987
988 list_add_tail(&kioc->list, &adapter->kioc_pool);
989 }
990
991 // Setup the dma pools for data buffers
992 if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
993 goto dma_pool_error;
994 }
995
996 list_add_tail(&adapter->list, &adapters_list_g);
997
998 adapters_count_g++;
999
1000 return 0;
1001
1002 dma_pool_error:
1003 /* Do nothing */
1004
1005 pthru_dma_pool_error:
1006
1007 for (i = 0; i < lld_adp->max_kioc; i++) {
1008 kioc = adapter->kioc_list + i;
1009 if (kioc->pthru32) {
1010 dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
1011 kioc->pthru32_h);
1012 }
1013 }
1014
1015 memalloc_error:
1016
1017 kfree(adapter->kioc_list);
1018 kfree(adapter->mbox_list);
1019
1020 if (adapter->pthru_dma_pool)
1021 dma_pool_destroy(adapter->pthru_dma_pool);
1022
1023 kfree(adapter);
1024
1025 return rval;
1026 }
1027
1028
1029 /**
1030 * mraid_mm_adapter_app_handle - return the application handle for this adapter
1031 * @unique_id : adapter unique identifier
1032 *
1033 * For the given driver data, locate the adapter in our global list and
1034 * return the corresponding handle, which is also used by applications to
1035 * uniquely identify an adapter.
1036 *
1037 * Return adapter handle if found in the list.
1038 * Return 0 if adapter could not be located, should never happen though.
1039 */
1040 uint32_t
mraid_mm_adapter_app_handle(uint32_t unique_id)1041 mraid_mm_adapter_app_handle(uint32_t unique_id)
1042 {
1043 mraid_mmadp_t *adapter;
1044 mraid_mmadp_t *tmp;
1045 int index = 0;
1046
1047 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1048
1049 if (adapter->unique_id == unique_id) {
1050
1051 return MKADAP(index);
1052 }
1053
1054 index++;
1055 }
1056
1057 return 0;
1058 }
1059
1060
1061 /**
1062 * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
1063 * @adp : Adapter softstate
1064 *
1065 * We maintain a pool of dma buffers per each adapter. Each pool has one
1066 * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
1067 * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
1068 * dont' want to waste too much memory by allocating more buffers per each
1069 * pool.
1070 */
1071 static int
mraid_mm_setup_dma_pools(mraid_mmadp_t * adp)1072 mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
1073 {
1074 mm_dmapool_t *pool;
1075 int bufsize;
1076 int i;
1077
1078 /*
1079 * Create MAX_DMA_POOLS number of pools
1080 */
1081 bufsize = MRAID_MM_INIT_BUFF_SIZE;
1082
1083 for (i = 0; i < MAX_DMA_POOLS; i++){
1084
1085 pool = &adp->dma_pool_list[i];
1086
1087 pool->buf_size = bufsize;
1088 spin_lock_init(&pool->lock);
1089
1090 pool->handle = dma_pool_create("megaraid mm data buffer",
1091 &adp->pdev->dev, bufsize,
1092 16, 0);
1093
1094 if (!pool->handle) {
1095 goto dma_pool_setup_error;
1096 }
1097
1098 pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
1099 &pool->paddr);
1100
1101 if (!pool->vaddr)
1102 goto dma_pool_setup_error;
1103
1104 bufsize = bufsize * 2;
1105 }
1106
1107 return 0;
1108
1109 dma_pool_setup_error:
1110
1111 mraid_mm_teardown_dma_pools(adp);
1112 return (-ENOMEM);
1113 }
1114
1115
1116 /**
1117 * mraid_mm_unregister_adp - Unregister routine for low level drivers
1118 * @unique_id : UID of the adpater
1119 *
1120 * Assumes no outstanding ioctls to llds.
1121 */
1122 int
mraid_mm_unregister_adp(uint32_t unique_id)1123 mraid_mm_unregister_adp(uint32_t unique_id)
1124 {
1125 mraid_mmadp_t *adapter;
1126 mraid_mmadp_t *tmp;
1127
1128 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1129
1130
1131 if (adapter->unique_id == unique_id) {
1132
1133 adapters_count_g--;
1134
1135 list_del_init(&adapter->list);
1136
1137 mraid_mm_free_adp_resources(adapter);
1138
1139 kfree(adapter);
1140
1141 con_log(CL_ANN, (
1142 "megaraid cmm: Unregistered one adapter:%#x\n",
1143 unique_id));
1144
1145 return 0;
1146 }
1147 }
1148
1149 return (-ENODEV);
1150 }
1151
1152 /**
1153 * mraid_mm_free_adp_resources - Free adapter softstate
1154 * @adp : Adapter softstate
1155 */
1156 static void
mraid_mm_free_adp_resources(mraid_mmadp_t * adp)1157 mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
1158 {
1159 uioc_t *kioc;
1160 int i;
1161
1162 mraid_mm_teardown_dma_pools(adp);
1163
1164 for (i = 0; i < adp->max_kioc; i++) {
1165
1166 kioc = adp->kioc_list + i;
1167
1168 dma_pool_free(adp->pthru_dma_pool, kioc->pthru32,
1169 kioc->pthru32_h);
1170 }
1171
1172 kfree(adp->kioc_list);
1173 kfree(adp->mbox_list);
1174
1175 dma_pool_destroy(adp->pthru_dma_pool);
1176
1177
1178 return;
1179 }
1180
1181
1182 /**
1183 * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
1184 * @adp : Adapter softstate
1185 */
1186 static void
mraid_mm_teardown_dma_pools(mraid_mmadp_t * adp)1187 mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1188 {
1189 int i;
1190 mm_dmapool_t *pool;
1191
1192 for (i = 0; i < MAX_DMA_POOLS; i++) {
1193
1194 pool = &adp->dma_pool_list[i];
1195
1196 if (pool->handle) {
1197
1198 if (pool->vaddr)
1199 dma_pool_free(pool->handle, pool->vaddr,
1200 pool->paddr);
1201
1202 dma_pool_destroy(pool->handle);
1203 pool->handle = NULL;
1204 }
1205 }
1206
1207 return;
1208 }
1209
1210 /**
1211 * mraid_mm_init - Module entry point
1212 */
1213 static int __init
mraid_mm_init(void)1214 mraid_mm_init(void)
1215 {
1216 int err;
1217
1218 // Announce the driver version
1219 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
1220 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
1221
1222 err = misc_register(&megaraid_mm_dev);
1223 if (err < 0) {
1224 con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
1225 return err;
1226 }
1227
1228 init_waitqueue_head(&wait_q);
1229
1230 INIT_LIST_HEAD(&adapters_list_g);
1231
1232 return 0;
1233 }
1234
1235
1236 #ifdef CONFIG_COMPAT
1237 /**
1238 * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine
1239 * @filep : file operations pointer (ignored)
1240 * @cmd : ioctl command
1241 * @arg : user ioctl packet
1242 */
1243 static long
mraid_mm_compat_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1244 mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
1245 unsigned long arg)
1246 {
1247 int err;
1248
1249 err = mraid_mm_ioctl(filep, cmd, arg);
1250
1251 return err;
1252 }
1253 #endif
1254
1255 /**
1256 * mraid_mm_exit - Module exit point
1257 */
1258 static void __exit
mraid_mm_exit(void)1259 mraid_mm_exit(void)
1260 {
1261 con_log(CL_DLEVEL1 , ("exiting common mod\n"));
1262
1263 misc_deregister(&megaraid_mm_dev);
1264 }
1265
1266 module_init(mraid_mm_init);
1267 module_exit(mraid_mm_exit);
1268
1269 /* vi: set ts=8 sw=8 tw=78: */
1270