Lines Matching +full:aac +full:- +full:mode
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Adaptec AAC series RAID controller driver
9 * Copyright (c) 2000-2010 Adaptec, Inc.
10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
42 * fib_map_alloc - allocate the fib objects
51 if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE) in fib_map_alloc()
52 dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; in fib_map_alloc()
54 dev->max_cmd_size = dev->max_fib_size; in fib_map_alloc()
55 if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) { in fib_map_alloc()
56 dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; in fib_map_alloc()
58 dev->max_cmd_size = dev->max_fib_size; in fib_map_alloc()
63 &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue, in fib_map_alloc()
64 AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); in fib_map_alloc()
65 dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev, in fib_map_alloc()
66 (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) in fib_map_alloc()
67 * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1), in fib_map_alloc()
68 &dev->hw_fib_pa, GFP_KERNEL); in fib_map_alloc()
69 if (dev->hw_fib_va == NULL) in fib_map_alloc()
70 return -ENOMEM; in fib_map_alloc()
75 * aac_fib_map_free - free the fib objects
88 if(!dev->hw_fib_va || !dev->max_cmd_size) in aac_fib_map_free()
91 num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB; in aac_fib_map_free()
92 fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr); in aac_fib_map_free()
93 alloc_size = fib_size * num_fibs + ALIGN32 - 1; in aac_fib_map_free()
95 dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va, in aac_fib_map_free()
96 dev->hw_fib_pa); in aac_fib_map_free()
98 dev->hw_fib_va = NULL; in aac_fib_map_free()
99 dev->hw_fib_pa = 0; in aac_fib_map_free()
108 for (i = 0, fibptr = &dev->fibs[i]; in aac_fib_vector_assign()
109 i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); in aac_fib_vector_assign()
111 if ((dev->max_msix == 1) || in aac_fib_vector_assign()
112 (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1) in aac_fib_vector_assign()
113 - dev->vector_cap))) { in aac_fib_vector_assign()
114 fibptr->vector_no = 0; in aac_fib_vector_assign()
116 fibptr->vector_no = vector; in aac_fib_vector_assign()
118 if (vector == dev->max_msix) in aac_fib_vector_assign()
125 * aac_fib_setup - setup the fibs
140 while (((i = fib_map_alloc(dev)) == -ENOMEM) in aac_fib_setup()
141 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) { in aac_fib_setup()
142 max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1; in aac_fib_setup()
143 dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB; in aac_fib_setup()
144 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) in aac_fib_setup()
145 dev->init->r7.max_io_commands = cpu_to_le32(max_cmds); in aac_fib_setup()
148 return -ENOMEM; in aac_fib_setup()
150 memset(dev->hw_fib_va, 0, in aac_fib_setup()
151 (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) * in aac_fib_setup()
152 (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); in aac_fib_setup()
155 hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); in aac_fib_setup()
156 hw_fib = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + in aac_fib_setup()
157 (hw_fib_pa - dev->hw_fib_pa)); in aac_fib_setup()
167 for (i = 0, fibptr = &dev->fibs[i]; in aac_fib_setup()
168 i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); in aac_fib_setup()
171 fibptr->flags = 0; in aac_fib_setup()
172 fibptr->size = sizeof(struct fib); in aac_fib_setup()
173 fibptr->dev = dev; in aac_fib_setup()
174 fibptr->hw_fib_va = hw_fib; in aac_fib_setup()
175 fibptr->data = (void *) fibptr->hw_fib_va->data; in aac_fib_setup()
176 fibptr->next = fibptr+1; /* Forward chain the fibs */ in aac_fib_setup()
177 init_completion(&fibptr->event_wait); in aac_fib_setup()
178 spin_lock_init(&fibptr->event_lock); in aac_fib_setup()
179 hw_fib->header.XferState = cpu_to_le32(0xffffffff); in aac_fib_setup()
180 hw_fib->header.SenderSize = in aac_fib_setup()
181 cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */ in aac_fib_setup()
182 fibptr->hw_fib_pa = hw_fib_pa; in aac_fib_setup()
183 fibptr->hw_sgl_pa = hw_fib_pa + in aac_fib_setup()
189 fibptr->hw_error_pa = hw_fib_pa + in aac_fib_setup()
193 dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)); in aac_fib_setup()
195 dev->max_cmd_size + sizeof(struct aac_fib_xporthdr); in aac_fib_setup()
206 dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL; in aac_fib_setup()
210 dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue]; in aac_fib_setup()
215 * aac_fib_alloc_tag-allocate a fib using tags
227 fibptr = &dev->fibs[scmd->request->tag]; in aac_fib_alloc_tag()
232 fibptr->hw_fib_va->header.XferState = 0; in aac_fib_alloc_tag()
233 fibptr->type = FSAFS_NTC_FIB_CONTEXT; in aac_fib_alloc_tag()
234 fibptr->callback_data = NULL; in aac_fib_alloc_tag()
235 fibptr->callback = NULL; in aac_fib_alloc_tag()
236 fibptr->flags = 0; in aac_fib_alloc_tag()
242 * aac_fib_alloc - allocate a fib
253 spin_lock_irqsave(&dev->fib_lock, flags); in aac_fib_alloc()
254 fibptr = dev->free_fib; in aac_fib_alloc()
256 spin_unlock_irqrestore(&dev->fib_lock, flags); in aac_fib_alloc()
259 dev->free_fib = fibptr->next; in aac_fib_alloc()
260 spin_unlock_irqrestore(&dev->fib_lock, flags); in aac_fib_alloc()
264 fibptr->type = FSAFS_NTC_FIB_CONTEXT; in aac_fib_alloc()
265 fibptr->size = sizeof(struct fib); in aac_fib_alloc()
270 fibptr->hw_fib_va->header.XferState = 0; in aac_fib_alloc()
271 fibptr->flags = 0; in aac_fib_alloc()
272 fibptr->callback = NULL; in aac_fib_alloc()
273 fibptr->callback_data = NULL; in aac_fib_alloc()
279 * aac_fib_free - free a fib
289 if (fibptr->done == 2) in aac_fib_free()
292 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); in aac_fib_free()
293 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) in aac_fib_free()
295 if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) && in aac_fib_free()
296 fibptr->hw_fib_va->header.XferState != 0) { in aac_fib_free()
299 le32_to_cpu(fibptr->hw_fib_va->header.XferState)); in aac_fib_free()
301 fibptr->next = fibptr->dev->free_fib; in aac_fib_free()
302 fibptr->dev->free_fib = fibptr; in aac_fib_free()
303 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags); in aac_fib_free()
307 * aac_fib_init - initialise a fib
315 struct hw_fib *hw_fib = fibptr->hw_fib_va; in aac_fib_init()
317 memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr)); in aac_fib_init()
318 hw_fib->header.StructType = FIB_MAGIC; in aac_fib_init()
319 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); in aac_fib_init()
320 …hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable… in aac_fib_init()
321 hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); in aac_fib_init()
322 hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); in aac_fib_init()
326 * fib_deallocate - deallocate a fib
335 struct hw_fib *hw_fib = fibptr->hw_fib_va; in fib_dealloc()
336 hw_fib->header.XferState = 0; in fib_dealloc()
347 * aac_get_entry - get a queue entry
371 q = &dev->queues->queue[qid]; in aac_get_entry()
373 idx = *index = le32_to_cpu(*(q->headers.producer)); in aac_get_entry()
375 if (idx != le32_to_cpu(*(q->headers.consumer))) { in aac_get_entry()
376 if (--idx == 0) { in aac_get_entry()
382 if (idx != le32_to_cpu(*(q->headers.consumer))) in aac_get_entry()
395 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { in aac_get_entry()
397 qid, atomic_read(&q->numpending)); in aac_get_entry()
400 *entry = q->base + *index; in aac_get_entry()
406 * aac_queue_get - get the next free QE
434 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); in aac_queue_get()
443 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); in aac_queue_get()
444 entry->addr = hw_fib->header.SenderFibAddress; in aac_queue_get()
446 …hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now whe… in aac_queue_get()
454 entry->addr = cpu_to_le32(fibptr->hw_fib_pa); in aac_queue_get()
467 * aac_fib_send - send a fib to the adapter
487 struct aac_dev * dev = fibptr->dev; in aac_fib_send()
488 struct hw_fib * hw_fib = fibptr->hw_fib_va; in aac_fib_send()
493 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) in aac_fib_send()
494 return -EBUSY; in aac_fib_send()
496 if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)) in aac_fib_send()
497 return -EINVAL; in aac_fib_send()
507 * will have a debug mode where the adapter can notify the host in aac_fib_send()
510 fibptr->flags = 0; in aac_fib_send()
512 return -EINVAL; in aac_fib_send()
514 hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected); in aac_fib_send()
517 hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected); in aac_fib_send()
520 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); in aac_fib_send()
527 hw_fib->header.SenderFibAddress = in aac_fib_send()
528 cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); in aac_fib_send()
533 hw_fib->header.Handle = in aac_fib_send()
534 cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); in aac_fib_send()
543 hw_fib->header.Command = cpu_to_le16(command); in aac_fib_send()
544 hw_fib->header.XferState |= cpu_to_le32(SentFromHost); in aac_fib_send()
548 hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); in aac_fib_send()
549 if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { in aac_fib_send()
550 return -EMSGSIZE; in aac_fib_send()
556 hw_fib->header.XferState |= cpu_to_le32(NormalPriority); in aac_fib_send()
563 fibptr->callback = callback; in aac_fib_send()
564 fibptr->callback_data = callback_data; in aac_fib_send()
565 fibptr->flags = FIB_CONTEXT_FLAG; in aac_fib_send()
568 fibptr->done = 0; in aac_fib_send()
573 dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command))); in aac_fib_send()
574 …bCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command))); in aac_fib_send()
575 dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState))); in aac_fib_send()
576 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va)); in aac_fib_send()
577 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); in aac_fib_send()
580 if (!dev->queues) in aac_fib_send()
581 return -EBUSY; in aac_fib_send()
585 spin_lock_irqsave(&dev->manage_lock, mflags); in aac_fib_send()
586 if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { in aac_fib_send()
588 dev->management_fib_count); in aac_fib_send()
589 spin_unlock_irqrestore(&dev->manage_lock, mflags); in aac_fib_send()
590 return -EBUSY; in aac_fib_send()
592 dev->management_fib_count++; in aac_fib_send()
593 spin_unlock_irqrestore(&dev->manage_lock, mflags); in aac_fib_send()
594 spin_lock_irqsave(&fibptr->event_lock, flags); in aac_fib_send()
597 if (dev->sync_mode) { in aac_fib_send()
599 spin_unlock_irqrestore(&fibptr->event_lock, flags); in aac_fib_send()
600 spin_lock_irqsave(&dev->sync_lock, sflags); in aac_fib_send()
601 if (dev->sync_fib) { in aac_fib_send()
602 list_add_tail(&fibptr->fiblink, &dev->sync_fib_list); in aac_fib_send()
603 spin_unlock_irqrestore(&dev->sync_lock, sflags); in aac_fib_send()
605 dev->sync_fib = fibptr; in aac_fib_send()
606 spin_unlock_irqrestore(&dev->sync_lock, sflags); in aac_fib_send()
608 (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0, in aac_fib_send()
612 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; in aac_fib_send()
613 if (wait_for_completion_interruptible(&fibptr->event_wait)) { in aac_fib_send()
614 fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT; in aac_fib_send()
615 return -EFAULT; in aac_fib_send()
619 return -EINPROGRESS; in aac_fib_send()
623 printk(KERN_ERR "aac_fib_send: returned -EBUSY\n"); in aac_fib_send()
625 spin_unlock_irqrestore(&fibptr->event_lock, flags); in aac_fib_send()
626 spin_lock_irqsave(&dev->manage_lock, mflags); in aac_fib_send()
627 dev->management_fib_count--; in aac_fib_send()
628 spin_unlock_irqrestore(&dev->manage_lock, mflags); in aac_fib_send()
630 return -EBUSY; in aac_fib_send()
639 spin_unlock_irqrestore(&fibptr->event_lock, flags); in aac_fib_send()
649 while (!try_wait_for_completion(&fibptr->event_wait)) { in aac_fib_send()
652 struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; in aac_fib_send()
653 atomic_dec(&q->numpending); in aac_fib_send()
654 if (wait == -1) { in aac_fib_send()
658 "the SAFE mode kernel options (acpi, apic etc)\n"); in aac_fib_send()
660 return -ETIMEDOUT; in aac_fib_send()
664 return -EFAULT; in aac_fib_send()
667 if (wait == -1) { in aac_fib_send()
672 return -EFAULT; in aac_fib_send()
679 } else if (wait_for_completion_interruptible(&fibptr->event_wait)) { in aac_fib_send()
684 spin_lock_irqsave(&fibptr->event_lock, flags); in aac_fib_send()
685 if (fibptr->done == 0) { in aac_fib_send()
686 fibptr->done = 2; /* Tell interrupt we aborted */ in aac_fib_send()
687 spin_unlock_irqrestore(&fibptr->event_lock, flags); in aac_fib_send()
688 return -ERESTARTSYS; in aac_fib_send()
690 spin_unlock_irqrestore(&fibptr->event_lock, flags); in aac_fib_send()
691 BUG_ON(fibptr->done == 0); in aac_fib_send()
693 if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) in aac_fib_send()
694 return -ETIMEDOUT; in aac_fib_send()
702 return -EINPROGRESS; in aac_fib_send()
710 struct aac_dev *dev = fibptr->dev; in aac_hba_send()
715 fibptr->hw_fib_va; in aac_hba_send()
717 fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA); in aac_hba_send()
720 fibptr->callback = callback; in aac_hba_send()
721 fibptr->callback_data = callback_data; in aac_hba_send()
726 hbacmd->iu_type = command; in aac_hba_send()
730 hbacmd->request_id = in aac_hba_send()
731 cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); in aac_hba_send()
732 fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD; in aac_hba_send()
734 return -EINVAL; in aac_hba_send()
738 spin_lock_irqsave(&dev->manage_lock, mflags); in aac_hba_send()
739 if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { in aac_hba_send()
740 spin_unlock_irqrestore(&dev->manage_lock, mflags); in aac_hba_send()
741 return -EBUSY; in aac_hba_send()
743 dev->management_fib_count++; in aac_hba_send()
744 spin_unlock_irqrestore(&dev->manage_lock, mflags); in aac_hba_send()
745 spin_lock_irqsave(&fibptr->event_lock, flags); in aac_hba_send()
750 spin_unlock_irqrestore(&fibptr->event_lock, flags); in aac_hba_send()
751 spin_lock_irqsave(&dev->manage_lock, mflags); in aac_hba_send()
752 dev->management_fib_count--; in aac_hba_send()
753 spin_unlock_irqrestore(&dev->manage_lock, mflags); in aac_hba_send()
755 return -EBUSY; in aac_hba_send()
761 spin_unlock_irqrestore(&fibptr->event_lock, flags); in aac_hba_send()
764 return -EFAULT; in aac_hba_send()
766 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; in aac_hba_send()
767 if (wait_for_completion_interruptible(&fibptr->event_wait)) in aac_hba_send()
768 fibptr->done = 2; in aac_hba_send()
769 fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT); in aac_hba_send()
771 spin_lock_irqsave(&fibptr->event_lock, flags); in aac_hba_send()
772 if ((fibptr->done == 0) || (fibptr->done == 2)) { in aac_hba_send()
773 fibptr->done = 2; /* Tell interrupt we aborted */ in aac_hba_send()
774 spin_unlock_irqrestore(&fibptr->event_lock, flags); in aac_hba_send()
775 return -ERESTARTSYS; in aac_hba_send()
777 spin_unlock_irqrestore(&fibptr->event_lock, flags); in aac_hba_send()
778 WARN_ON(fibptr->done == 0); in aac_hba_send()
780 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) in aac_hba_send()
781 return -ETIMEDOUT; in aac_hba_send()
786 return -EINPROGRESS; in aac_hba_send()
790 * aac_consumer_get - get the top of the queue
804 if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) { in aac_consumer_get()
812 if (le32_to_cpu(*q->headers.consumer) >= q->entries) in aac_consumer_get()
815 index = le32_to_cpu(*q->headers.consumer); in aac_consumer_get()
816 *entry = q->base + index; in aac_consumer_get()
823 * aac_consumer_free - free consumer entry
837 if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) in aac_consumer_free()
840 if (le32_to_cpu(*q->headers.consumer) >= q->entries) in aac_consumer_free()
841 *q->headers.consumer = cpu_to_le32(1); in aac_consumer_free()
843 le32_add_cpu(q->headers.consumer, 1); in aac_consumer_free()
863 * aac_fib_adapter_complete - complete adapter issued fib
873 struct hw_fib * hw_fib = fibptr->hw_fib_va; in aac_fib_adapter_complete()
874 struct aac_dev * dev = fibptr->dev; in aac_fib_adapter_complete()
879 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || in aac_fib_adapter_complete()
880 dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || in aac_fib_adapter_complete()
881 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { in aac_fib_adapter_complete()
886 if (hw_fib->header.XferState == 0) { in aac_fib_adapter_complete()
887 if (dev->comm_interface == AAC_COMM_MESSAGE) in aac_fib_adapter_complete()
894 if (hw_fib->header.StructType != FIB_MAGIC && in aac_fib_adapter_complete()
895 hw_fib->header.StructType != FIB_MAGIC2 && in aac_fib_adapter_complete()
896 hw_fib->header.StructType != FIB_MAGIC2_64) { in aac_fib_adapter_complete()
897 if (dev->comm_interface == AAC_COMM_MESSAGE) in aac_fib_adapter_complete()
899 return -EINVAL; in aac_fib_adapter_complete()
908 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { in aac_fib_adapter_complete()
909 if (dev->comm_interface == AAC_COMM_MESSAGE) { in aac_fib_adapter_complete()
913 hw_fib->header.XferState |= cpu_to_le32(HostProcessed); in aac_fib_adapter_complete()
916 if (size > le16_to_cpu(hw_fib->header.SenderSize)) in aac_fib_adapter_complete()
917 return -EMSGSIZE; in aac_fib_adapter_complete()
918 hw_fib->header.Size = cpu_to_le16(size); in aac_fib_adapter_complete()
920 q = &dev->queues->queue[AdapNormRespQueue]; in aac_fib_adapter_complete()
921 spin_lock_irqsave(q->lock, qflags); in aac_fib_adapter_complete()
923 *(q->headers.producer) = cpu_to_le32(index + 1); in aac_fib_adapter_complete()
924 spin_unlock_irqrestore(q->lock, qflags); in aac_fib_adapter_complete()
937 * aac_fib_complete - fib completion handler
945 struct hw_fib * hw_fib = fibptr->hw_fib_va; in aac_fib_complete()
947 if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) { in aac_fib_complete()
957 if (hw_fib->header.XferState == 0 || fibptr->done == 2) in aac_fib_complete()
963 if (hw_fib->header.StructType != FIB_MAGIC && in aac_fib_complete()
964 hw_fib->header.StructType != FIB_MAGIC2 && in aac_fib_complete()
965 hw_fib->header.StructType != FIB_MAGIC2_64) in aac_fib_complete()
966 return -EINVAL; in aac_fib_complete()
974 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && in aac_fib_complete()
975 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) in aac_fib_complete()
979 else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost)) in aac_fib_complete()
986 } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) { in aac_fib_complete()
995 * aac_printf - handle printf from firmware
1005 char *cp = dev->printfbuf; in aac_printf()
1006 if (dev->printf_enabled) in aac_printf()
1020 printk(KERN_WARNING "%s:%s", dev->name, cp); in aac_printf()
1022 printk(KERN_INFO "%s:%s", dev->name, cp); in aac_printf()
1029 return le32_to_cpu(((__le32 *)aifcmd->data)[index]); in aac_aif_data()
1038 dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n", in aac_handle_aif_bu()
1041 dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n"); in aac_handle_aif_bu()
1045 dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n", in aac_handle_aif_bu()
1048 dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n"); in aac_handle_aif_bu()
1055 * aac_handle_aif - Handle a message from the firmware
1064 struct hw_fib * hw_fib = fibptr->hw_fib_va; in aac_handle_aif()
1065 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; in aac_handle_aif()
1077 if (!dev || !dev->fsa_dev) in aac_handle_aif()
1079 container = channel = id = lun = (u32)-1; in aac_handle_aif()
1083 * re-configures that take place. As a result of this when in aac_handle_aif()
1085 * type of AIF before setting the re-config flag. in aac_handle_aif()
1087 switch (le32_to_cpu(aifcmd->command)) { in aac_handle_aif()
1089 switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) { in aac_handle_aif()
1091 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); in aac_handle_aif()
1093 container = (u32)-1; in aac_handle_aif()
1097 if (channel >= dev->maximum_num_channels) { in aac_handle_aif()
1098 container = (u32)-1; in aac_handle_aif()
1102 if (id >= dev->maximum_num_physicals) { in aac_handle_aif()
1103 container = (u32)-1; in aac_handle_aif()
1107 container = (u32)-1; in aac_handle_aif()
1117 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); in aac_handle_aif()
1118 if (container >= dev->maximum_num_containers) in aac_handle_aif()
1124 * so set the flag to initiate a new re-config once we in aac_handle_aif()
1128 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { in aac_handle_aif()
1129 device = scsi_device_lookup(dev->scsi_host_ptr, in aac_handle_aif()
1134 dev->fsa_dev[container].config_needed = CHANGE; in aac_handle_aif()
1135 dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; in aac_handle_aif()
1136 dev->fsa_dev[container].config_waiting_stamp = jiffies; in aac_handle_aif()
1144 * that thing then set the re-configure flag. in aac_handle_aif()
1146 if (container != (u32)-1) { in aac_handle_aif()
1147 if (container >= dev->maximum_num_containers) in aac_handle_aif()
1149 if ((dev->fsa_dev[container].config_waiting_on == in aac_handle_aif()
1150 le32_to_cpu(*(__le32 *)aifcmd->data)) && in aac_handle_aif()
1151 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) in aac_handle_aif()
1152 dev->fsa_dev[container].config_waiting_on = 0; in aac_handle_aif()
1154 container < dev->maximum_num_containers; ++container) { in aac_handle_aif()
1155 if ((dev->fsa_dev[container].config_waiting_on == in aac_handle_aif()
1156 le32_to_cpu(*(__le32 *)aifcmd->data)) && in aac_handle_aif()
1157 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) in aac_handle_aif()
1158 dev->fsa_dev[container].config_waiting_on = 0; in aac_handle_aif()
1163 switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) { in aac_handle_aif()
1165 dev->cache_protected = in aac_handle_aif()
1166 (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3)); in aac_handle_aif()
1172 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); in aac_handle_aif()
1173 if (container >= dev->maximum_num_containers) in aac_handle_aif()
1175 dev->fsa_dev[container].config_needed = ADD; in aac_handle_aif()
1176 dev->fsa_dev[container].config_waiting_on = in aac_handle_aif()
1178 dev->fsa_dev[container].config_waiting_stamp = jiffies; in aac_handle_aif()
1185 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); in aac_handle_aif()
1186 if (container >= dev->maximum_num_containers) in aac_handle_aif()
1188 dev->fsa_dev[container].config_needed = DELETE; in aac_handle_aif()
1189 dev->fsa_dev[container].config_waiting_on = in aac_handle_aif()
1191 dev->fsa_dev[container].config_waiting_stamp = jiffies; in aac_handle_aif()
1199 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); in aac_handle_aif()
1200 if (container >= dev->maximum_num_containers) in aac_handle_aif()
1202 if (dev->fsa_dev[container].config_waiting_on && in aac_handle_aif()
1203 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) in aac_handle_aif()
1205 dev->fsa_dev[container].config_needed = CHANGE; in aac_handle_aif()
1206 dev->fsa_dev[container].config_waiting_on = in aac_handle_aif()
1208 dev->fsa_dev[container].config_waiting_stamp = jiffies; in aac_handle_aif()
1216 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); in aac_handle_aif()
1218 container = (u32)-1; in aac_handle_aif()
1222 if (channel >= dev->maximum_num_channels) { in aac_handle_aif()
1223 container = (u32)-1; in aac_handle_aif()
1227 if (id >= dev->maximum_num_physicals) { in aac_handle_aif()
1228 container = (u32)-1; in aac_handle_aif()
1232 container = (u32)-1; in aac_handle_aif()
1235 (((__le32 *)aifcmd->data)[0] == in aac_handle_aif()
1238 device = scsi_device_lookup(dev->scsi_host_ptr, in aac_handle_aif()
1251 * If in JBOD mode, automatic exposure of new in aac_handle_aif()
1254 if (dev->jbod) in aac_handle_aif()
1256 switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) { in aac_handle_aif()
1262 ((__le32 *)aifcmd->data)[2]); in aac_handle_aif()
1264 container = (u32)-1; in aac_handle_aif()
1268 if (channel >= dev->maximum_num_channels) { in aac_handle_aif()
1269 container = (u32)-1; in aac_handle_aif()
1274 container = (u32)-1; in aac_handle_aif()
1275 if (id >= dev->maximum_num_physicals) { in aac_handle_aif()
1279 dev->maximum_num_channels)) in aac_handle_aif()
1286 ((((__le32 *)aifcmd->data)[3] in aac_handle_aif()
1288 (((__le32 *)aifcmd->data)[3] in aac_handle_aif()
1301 * that thing then set the re-configure flag. in aac_handle_aif()
1303 if (container != (u32)-1) { in aac_handle_aif()
1304 if (container >= dev->maximum_num_containers) in aac_handle_aif()
1306 if ((dev->fsa_dev[container].config_waiting_on == in aac_handle_aif()
1307 le32_to_cpu(*(__le32 *)aifcmd->data)) && in aac_handle_aif()
1308 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) in aac_handle_aif()
1309 dev->fsa_dev[container].config_waiting_on = 0; in aac_handle_aif()
1311 container < dev->maximum_num_containers; ++container) { in aac_handle_aif()
1312 if ((dev->fsa_dev[container].config_waiting_on == in aac_handle_aif()
1313 le32_to_cpu(*(__le32 *)aifcmd->data)) && in aac_handle_aif()
1314 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) in aac_handle_aif()
1315 dev->fsa_dev[container].config_waiting_on = 0; in aac_handle_aif()
1328 if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) && in aac_handle_aif()
1329 (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] || in aac_handle_aif()
1330 ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) { in aac_handle_aif()
1332 container < dev->maximum_num_containers; in aac_handle_aif()
1338 dev->fsa_dev[container].config_waiting_on = in aac_handle_aif()
1340 dev->fsa_dev[container].config_needed = ADD; in aac_handle_aif()
1341 dev->fsa_dev[container].config_waiting_stamp = in aac_handle_aif()
1345 if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) && in aac_handle_aif()
1346 ((__le32 *)aifcmd->data)[6] == 0 && in aac_handle_aif()
1347 ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) { in aac_handle_aif()
1349 container < dev->maximum_num_containers; in aac_handle_aif()
1355 dev->fsa_dev[container].config_waiting_on = in aac_handle_aif()
1357 dev->fsa_dev[container].config_needed = DELETE; in aac_handle_aif()
1358 dev->fsa_dev[container].config_waiting_stamp = in aac_handle_aif()
1368 for (; container < dev->maximum_num_containers; ++container) { in aac_handle_aif()
1369 if ((dev->fsa_dev[container].config_waiting_on == 0) && in aac_handle_aif()
1370 (dev->fsa_dev[container].config_needed != NOTHING) && in aac_handle_aif()
1371 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) { in aac_handle_aif()
1373 dev->fsa_dev[container].config_needed; in aac_handle_aif()
1374 dev->fsa_dev[container].config_needed = NOTHING; in aac_handle_aif()
1386 * If we decided that a re-configuration needs to be done, in aac_handle_aif()
1397 if (!dev || !dev->scsi_host_ptr) in aac_handle_aif()
1404 if (dev->fsa_dev[container].valid == 1) in aac_handle_aif()
1405 dev->fsa_dev[container].valid = 2; in aac_handle_aif()
1408 device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun); in aac_handle_aif()
1418 "Device offlined - %s\n", in aac_handle_aif()
1428 "Device online - %s\n", in aac_handle_aif()
1437 && (!dev->fsa_dev[container].valid)) { in aac_handle_aif()
1445 "Device offlined - %s\n", in aac_handle_aif()
1450 scsi_rescan_device(&device->sdev_gendev); in aac_handle_aif()
1459 scsi_add_device(dev->scsi_host_ptr, channel, id, lun); in aac_handle_aif()
1467 static void aac_schedule_bus_scan(struct aac_dev *aac) in aac_schedule_bus_scan() argument
1469 if (aac->sa_firmware) in aac_schedule_bus_scan()
1470 aac_schedule_safw_scan_worker(aac); in aac_schedule_bus_scan()
1472 aac_schedule_src_reinit_aif_worker(aac); in aac_schedule_bus_scan()
1475 static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) in _aac_reset_adapter() argument
1479 struct Scsi_Host *host = aac->scsi_host_ptr; in _aac_reset_adapter()
1487 * - host is locked, unless called by the aacraid thread. in _aac_reset_adapter()
1490 * - in_reset is asserted, so no new i/o is getting to the in _aac_reset_adapter()
1492 * - The card is dead, or will be very shortly ;-/ so no new in _aac_reset_adapter()
1495 aac_adapter_disable_int(aac); in _aac_reset_adapter()
1496 if (aac->thread && aac->thread->pid != current->pid) { in _aac_reset_adapter()
1497 spin_unlock_irq(host->host_lock); in _aac_reset_adapter()
1498 kthread_stop(aac->thread); in _aac_reset_adapter()
1499 aac->thread = NULL; in _aac_reset_adapter()
1507 bled = forced ? 0 : aac_adapter_check_health(aac); in _aac_reset_adapter()
1508 retval = aac_adapter_restart(aac, bled, reset_type); in _aac_reset_adapter()
1517 num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB; in _aac_reset_adapter()
1520 struct fib *fib = &aac->fibs[index]; in _aac_reset_adapter()
1521 __le32 XferState = fib->hw_fib_va->header.XferState; in _aac_reset_adapter()
1529 || fib->flags & FIB_CONTEXT_FLAG_WAIT) { in _aac_reset_adapter()
1531 spin_lock_irqsave(&fib->event_lock, flagv); in _aac_reset_adapter()
1532 complete(&fib->event_wait); in _aac_reset_adapter()
1533 spin_unlock_irqrestore(&fib->event_lock, flagv); in _aac_reset_adapter()
1541 index = aac->cardtype; in _aac_reset_adapter()
1544 * Re-initialize the adapter, first free resources, then carefully in _aac_reset_adapter()
1550 aac_free_irq(aac); in _aac_reset_adapter()
1551 aac_fib_map_free(aac); in _aac_reset_adapter()
1552 dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, in _aac_reset_adapter()
1553 aac->comm_phys); in _aac_reset_adapter()
1554 aac_adapter_ioremap(aac, 0); in _aac_reset_adapter()
1555 aac->comm_addr = NULL; in _aac_reset_adapter()
1556 aac->comm_phys = 0; in _aac_reset_adapter()
1557 kfree(aac->queues); in _aac_reset_adapter()
1558 aac->queues = NULL; in _aac_reset_adapter()
1559 kfree(aac->fsa_dev); in _aac_reset_adapter()
1560 aac->fsa_dev = NULL; in _aac_reset_adapter()
1563 quirks = aac_get_driver_ident(index)->quirks; in _aac_reset_adapter()
1565 retval = dma_set_mask(&aac->pdev->dev, dmamask); in _aac_reset_adapter()
1567 retval = dma_set_mask(&aac->pdev->dev, dmamask); in _aac_reset_adapter()
1569 retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask); in _aac_reset_adapter()
1573 retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask); in _aac_reset_adapter()
1579 if ((retval = (*(aac_get_driver_ident(index)->init))(aac))) in _aac_reset_adapter()
1583 aac->thread = kthread_run(aac_command_thread, aac, "%s", in _aac_reset_adapter()
1584 aac->name); in _aac_reset_adapter()
1585 if (IS_ERR(aac->thread)) { in _aac_reset_adapter()
1586 retval = PTR_ERR(aac->thread); in _aac_reset_adapter()
1587 aac->thread = NULL; in _aac_reset_adapter()
1591 (void)aac_get_adapter_info(aac); in _aac_reset_adapter()
1592 if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) { in _aac_reset_adapter()
1593 host->sg_tablesize = 34; in _aac_reset_adapter()
1594 host->max_sectors = (host->sg_tablesize * 8) + 112; in _aac_reset_adapter()
1596 if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) { in _aac_reset_adapter()
1597 host->sg_tablesize = 17; in _aac_reset_adapter()
1598 host->max_sectors = (host->sg_tablesize * 8) + 112; in _aac_reset_adapter()
1600 aac_get_config_status(aac, 1); in _aac_reset_adapter()
1601 aac_get_containers(aac); in _aac_reset_adapter()
1610 aac->in_reset = 0; in _aac_reset_adapter()
1617 dev_info(&aac->pdev->dev, "Scheduling bus rescan\n"); in _aac_reset_adapter()
1618 aac_schedule_bus_scan(aac); in _aac_reset_adapter()
1622 spin_lock_irq(host->host_lock); in _aac_reset_adapter()
1627 int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) in aac_reset_adapter() argument
1631 struct Scsi_Host *host = aac->scsi_host_ptr; in aac_reset_adapter()
1634 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) in aac_reset_adapter()
1635 return -EBUSY; in aac_reset_adapter()
1637 if (aac->in_reset) { in aac_reset_adapter()
1638 spin_unlock_irqrestore(&aac->fib_lock, flagv); in aac_reset_adapter()
1639 return -EBUSY; in aac_reset_adapter()
1641 aac->in_reset = 1; in aac_reset_adapter()
1642 spin_unlock_irqrestore(&aac->fib_lock, flagv); in aac_reset_adapter()
1651 /* Quiesce build, flush cache, write through mode */ in aac_reset_adapter()
1653 aac_send_shutdown(aac); in aac_reset_adapter()
1654 spin_lock_irqsave(host->host_lock, flagv); in aac_reset_adapter()
1657 retval = _aac_reset_adapter(aac, bled, reset_type); in aac_reset_adapter()
1658 spin_unlock_irqrestore(host->host_lock, flagv); in aac_reset_adapter()
1663 if ((forced < 2) && (retval == -ENODEV)) { in aac_reset_adapter()
1665 struct fib * fibctx = aac_fib_alloc(aac); in aac_reset_adapter()
1674 cmd->command = cpu_to_le32(VM_ContainerConfig); in aac_reset_adapter()
1675 cmd->type = cpu_to_le32(CT_PAUSE_IO); in aac_reset_adapter()
1676 cmd->timeout = cpu_to_le32(1); in aac_reset_adapter()
1677 cmd->min = cpu_to_le32(1); in aac_reset_adapter()
1678 cmd->noRescan = cpu_to_le32(1); in aac_reset_adapter()
1679 cmd->count = cpu_to_le32(0); in aac_reset_adapter()
1685 -2 /* Timeout silently */, 1, in aac_reset_adapter()
1692 if (status != -ERESTARTSYS) in aac_reset_adapter()
1700 int aac_check_health(struct aac_dev * aac) in aac_check_health() argument
1706 /* Extending the scope of fib_lock slightly to protect aac->in_reset */ in aac_check_health()
1707 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) in aac_check_health()
1710 if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) { in aac_check_health()
1711 spin_unlock_irqrestore(&aac->fib_lock, flagv); in aac_check_health()
1715 aac->in_reset = 1; in aac_check_health()
1722 * aac.aifcmd.data[2] = AifHighPriority = 3 in aac_check_health()
1723 * aac.aifcmd.data[3] = BlinkLED in aac_check_health()
1727 entry = aac->fib_list.next; in aac_check_health()
1735 while (entry != &aac->fib_list) { in aac_check_health()
1746 if (fibctx->count > 20) { in aac_check_health()
1752 u32 time_last = fibctx->jiffies; in aac_check_health()
1758 if ((time_now - time_last) > aif_timeout) { in aac_check_health()
1759 entry = entry->next; in aac_check_health()
1760 aac_close_fib_context(aac, fibctx); in aac_check_health()
1773 fib->hw_fib_va = hw_fib; in aac_check_health()
1774 fib->dev = aac; in aac_check_health()
1776 fib->type = FSAFS_NTC_FIB_CONTEXT; in aac_check_health()
1777 fib->size = sizeof (struct fib); in aac_check_health()
1778 fib->data = hw_fib->data; in aac_check_health()
1779 aif = (struct aac_aifcmd *)hw_fib->data; in aac_check_health()
1780 aif->command = cpu_to_le32(AifCmdEventNotify); in aac_check_health()
1781 aif->seqnum = cpu_to_le32(0xFFFFFFFF); in aac_check_health()
1782 ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent); in aac_check_health()
1783 ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic); in aac_check_health()
1784 ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority); in aac_check_health()
1785 ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED); in aac_check_health()
1791 list_add_tail(&fib->fiblink, &fibctx->fib_list); in aac_check_health()
1792 fibctx->count++; in aac_check_health()
1797 complete(&fibctx->completion); in aac_check_health()
1803 entry = entry->next; in aac_check_health()
1806 spin_unlock_irqrestore(&aac->fib_lock, flagv); in aac_check_health()
1810 aac->name, BlinkLED); in aac_check_health()
1814 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); in aac_check_health()
1817 aac->in_reset = 0; in aac_check_health()
1821 static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target) in is_safw_raid_volume() argument
1823 return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers; in is_safw_raid_volume()
1833 return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0); in aac_lookup_safw_scsi_device()
1841 return scsi_add_device(dev->scsi_host_ptr, bus, target, 0); in aac_add_safw_device()
1862 return dev->hba_map[bus][target].scan_counter == dev->scan_counter; in aac_is_safw_scan_count_equal()
1868 return dev->fsa_dev[target].valid; in aac_is_safw_target_valid()
1931 mutex_lock(&dev->scan_mutex); in aac_scan_host()
1932 if (dev->sa_firmware) in aac_scan_host()
1935 scsi_scan_host(dev->scsi_host_ptr); in aac_scan_host()
1936 mutex_unlock(&dev->scan_mutex); in aac_scan_host()
1946 wait_event(dev->scsi_host_ptr->host_wait, in aac_src_reinit_aif_worker()
1947 !scsi_host_in_recovery(dev->scsi_host_ptr)); in aac_src_reinit_aif_worker()
1948 aac_reinit_aif(dev, dev->cardtype); in aac_src_reinit_aif_worker()
1964 if (fibptr->hbacmd_size & SA_AIF_HOTPLUG) in aac_handle_sa_aif()
1966 else if (fibptr->hbacmd_size & SA_AIF_HARDWARE) in aac_handle_sa_aif()
1968 else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE) in aac_handle_sa_aif()
1970 else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE) in aac_handle_sa_aif()
1972 else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE) in aac_handle_sa_aif()
1974 else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE) in aac_handle_sa_aif()
1996 pr_warn(" AIF not cleared by firmware - %d/%d)\n", in aac_handle_sa_aif()
2012 * and pre-allocate a set of fibs outside the in get_fib_count()
2015 num = le32_to_cpu(dev->init->r7.adapter_fibs_size) in get_fib_count()
2017 spin_lock_irqsave(&dev->fib_lock, flagv); in get_fib_count()
2018 entry = dev->fib_list.next; in get_fib_count()
2019 while (entry != &dev->fib_list) { in get_fib_count()
2020 entry = entry->next; in get_fib_count()
2023 spin_unlock_irqrestore(&dev->fib_lock, flagv); in get_fib_count()
2040 --hw_fib_p; in fillup_pools()
2046 kfree(*(--hw_fib_p)); in fillup_pools()
2054 num = hw_fib_p - hw_fib_pool; in fillup_pools()
2075 spin_lock_irqsave(&dev->fib_lock, flagv); in wakeup_fibctx_threads()
2076 entry = dev->fib_list.next; in wakeup_fibctx_threads()
2086 while (entry != &dev->fib_list) { in wakeup_fibctx_threads()
2096 if (fibctx->count > 20) { in wakeup_fibctx_threads()
2102 time_last = fibctx->jiffies; in wakeup_fibctx_threads()
2108 if ((time_now - time_last) > aif_timeout) { in wakeup_fibctx_threads()
2109 entry = entry->next; in wakeup_fibctx_threads()
2120 entry = entry->next; in wakeup_fibctx_threads()
2133 newfib->hw_fib_va = hw_newfib; in wakeup_fibctx_threads()
2138 list_add_tail(&newfib->fiblink, &fibctx->fib_list); in wakeup_fibctx_threads()
2139 fibctx->count++; in wakeup_fibctx_threads()
2144 complete(&fibctx->completion); in wakeup_fibctx_threads()
2146 entry = entry->next; in wakeup_fibctx_threads()
2151 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); in wakeup_fibctx_threads()
2153 spin_unlock_irqrestore(&dev->fib_lock, flagv); in wakeup_fibctx_threads()
2164 t_lock = dev->queues->queue[HostNormCmdQueue].lock; in aac_process_events()
2167 while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { in aac_process_events()
2176 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; in aac_process_events()
2179 t_lock = dev->queues->queue[HostNormCmdQueue].lock; in aac_process_events()
2183 hw_fib = fib->hw_fib_va; in aac_process_events()
2184 if (dev->sa_firmware) { in aac_process_events()
2197 fib->type = FSAFS_NTC_FIB_CONTEXT; in aac_process_events()
2198 fib->size = sizeof(struct fib); in aac_process_events()
2199 fib->hw_fib_va = hw_fib; in aac_process_events()
2200 fib->data = hw_fib->data; in aac_process_events()
2201 fib->dev = dev; in aac_process_events()
2206 aifcmd = (struct aac_aifcmd *) hw_fib->data; in aac_process_events()
2207 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { in aac_process_events()
2210 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); in aac_process_events()
2220 if (aifcmd->command == cpu_to_le32(AifCmdEventNotify) in aac_process_events()
2221 || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) { in aac_process_events()
2271 t_lock = dev->queues->queue[HostNormCmdQueue].lock; in aac_process_events()
2277 t_lock = dev->queues->queue[HostNormCmdQueue].lock; in aac_process_events()
2289 int ret = -ENOMEM; in aac_send_wellness_command()
2296 dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr, in aac_send_wellness_command()
2303 vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus); in aac_send_wellness_command()
2304 vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target); in aac_send_wellness_command()
2308 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); in aac_send_wellness_command()
2309 srbcmd->channel = cpu_to_le32(vbus); in aac_send_wellness_command()
2310 srbcmd->id = cpu_to_le32(vid); in aac_send_wellness_command()
2311 srbcmd->lun = 0; in aac_send_wellness_command()
2312 srbcmd->flags = cpu_to_le32(SRB_DataOut); in aac_send_wellness_command()
2313 srbcmd->timeout = cpu_to_le32(10); in aac_send_wellness_command()
2314 srbcmd->retry_limit = 0; in aac_send_wellness_command()
2315 srbcmd->cdb_size = cpu_to_le32(12); in aac_send_wellness_command()
2316 srbcmd->count = cpu_to_le32(datasize); in aac_send_wellness_command()
2318 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); in aac_send_wellness_command()
2319 srbcmd->cdb[0] = BMIC_OUT; in aac_send_wellness_command()
2320 srbcmd->cdb[6] = WRITE_HOST_WELLNESS; in aac_send_wellness_command()
2323 sg64 = (struct sgmap64 *)&srbcmd->sg; in aac_send_wellness_command()
2324 sg64->count = cpu_to_le32(1); in aac_send_wellness_command()
2325 sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16)); in aac_send_wellness_command()
2326 sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); in aac_send_wellness_command()
2327 sg64->sg[0].count = cpu_to_le32(datasize); in aac_send_wellness_command()
2332 dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr); in aac_send_wellness_command()
2345 if (ret != -ERESTARTSYS) in aac_send_wellness_command()
2361 int ret = -ENODEV; in aac_send_safw_hostttime()
2363 if (!dev->sa_firmware) in aac_send_safw_hostttime()
2366 local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60)); in aac_send_safw_hostttime()
2386 int ret = -ENOMEM; in aac_send_hosttime()
2396 *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */ in aac_send_hosttime()
2411 if (ret != -ERESTARTSYS) in aac_send_hosttime()
2419 * aac_command_thread - command processing thread
2439 if (dev->aif_thread) in aac_command_thread()
2440 return -EINVAL; in aac_command_thread()
2445 dev->aif_thread = 1; in aac_command_thread()
2446 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); in aac_command_thread()
2457 && ((difference = next_check_jiffies - jiffies) <= 0)) { in aac_command_thread()
2463 } else if (!dev->queues) in aac_command_thread()
2467 && ((difference = next_jiffies - jiffies) <= 0)) { in aac_command_thread()
2473 if (ret || !dev->queues) in aac_command_thread()
2481 if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec) in aac_command_thread()
2483 difference = HZ + HZ / 2 - in aac_command_thread()
2489 if (dev->sa_firmware) in aac_command_thread()
2499 difference = next_check_jiffies - jiffies; in aac_command_thread()
2517 if (dev->queues) in aac_command_thread()
2518 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); in aac_command_thread()
2519 dev->aif_thread = 0; in aac_command_thread()
2529 if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { in aac_acquire_irq()
2530 for (i = 0; i < dev->max_msix; i++) { in aac_acquire_irq()
2531 dev->aac_msix[i].vector_no = i; in aac_acquire_irq()
2532 dev->aac_msix[i].dev = dev; in aac_acquire_irq()
2533 if (request_irq(pci_irq_vector(dev->pdev, i), in aac_acquire_irq()
2534 dev->a_ops.adapter_intr, in aac_acquire_irq()
2535 0, "aacraid", &(dev->aac_msix[i]))) { in aac_acquire_irq()
2537 dev->name, dev->id, i); in aac_acquire_irq()
2539 free_irq(pci_irq_vector(dev->pdev, j), in aac_acquire_irq()
2540 &(dev->aac_msix[j])); in aac_acquire_irq()
2541 pci_disable_msix(dev->pdev); in aac_acquire_irq()
2542 ret = -1; in aac_acquire_irq()
2546 dev->aac_msix[0].vector_no = 0; in aac_acquire_irq()
2547 dev->aac_msix[0].dev = dev; in aac_acquire_irq()
2549 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, in aac_acquire_irq()
2551 &(dev->aac_msix[0])) < 0) { in aac_acquire_irq()
2552 if (dev->msi) in aac_acquire_irq()
2553 pci_disable_msi(dev->pdev); in aac_acquire_irq()
2555 dev->name, dev->id); in aac_acquire_irq()
2556 ret = -1; in aac_acquire_irq()
2567 if (dev->max_msix > 1) { in aac_free_irq()
2568 for (i = 0; i < dev->max_msix; i++) in aac_free_irq()
2569 free_irq(pci_irq_vector(dev->pdev, i), in aac_free_irq()
2570 &(dev->aac_msix[i])); in aac_free_irq()
2572 free_irq(dev->pdev->irq, &(dev->aac_msix[0])); in aac_free_irq()
2575 free_irq(dev->pdev->irq, dev); in aac_free_irq()
2577 if (dev->msi) in aac_free_irq()
2578 pci_disable_msi(dev->pdev); in aac_free_irq()
2579 else if (dev->max_msix > 1) in aac_free_irq()
2580 pci_disable_msix(dev->pdev); in aac_free_irq()