Lines Matching +full:memory +full:- +full:mapped

4  * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
6 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
46 #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
48 #define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */
49 #define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */
54 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; in lpfc_mem_alloc_active_rrq_pool_s4()
57 return -ENOMEM; in lpfc_mem_alloc_active_rrq_pool_s4()
58 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * in lpfc_mem_alloc_active_rrq_pool_s4()
60 phba->cfg_rrq_xri_bitmap_sz = bytes; in lpfc_mem_alloc_active_rrq_pool_s4()
61 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, in lpfc_mem_alloc_active_rrq_pool_s4()
63 if (!phba->active_rrq_pool) in lpfc_mem_alloc_active_rrq_pool_s4()
64 return -ENOMEM; in lpfc_mem_alloc_active_rrq_pool_s4()
70 * lpfc_mem_alloc - create and allocate all PCI and memory pools
75 * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
78 * Notes: Not interrupt-safe. Must be called with no locks held. If any
79 * allocation fails, frees all successfully allocated memory before returning.
83 * -ENOMEM on failure (if any memory allocations fail)
88 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mem_alloc()
92 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, in lpfc_mem_alloc()
95 if (!phba->lpfc_mbuf_pool) in lpfc_mem_alloc()
98 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, in lpfc_mem_alloc()
101 if (!pool->elements) in lpfc_mem_alloc()
104 pool->max_count = 0; in lpfc_mem_alloc()
105 pool->current_count = 0; in lpfc_mem_alloc()
107 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool, in lpfc_mem_alloc()
108 GFP_KERNEL, &pool->elements[i].phys); in lpfc_mem_alloc()
109 if (!pool->elements[i].virt) in lpfc_mem_alloc()
111 pool->max_count++; in lpfc_mem_alloc()
112 pool->current_count++; in lpfc_mem_alloc()
115 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE, in lpfc_mem_alloc()
117 if (!phba->mbox_mem_pool) in lpfc_mem_alloc()
120 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, in lpfc_mem_alloc()
122 if (!phba->nlp_mem_pool) in lpfc_mem_alloc()
125 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_mem_alloc()
126 phba->rrq_pool = in lpfc_mem_alloc()
129 if (!phba->rrq_pool) in lpfc_mem_alloc()
131 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool", in lpfc_mem_alloc()
132 &phba->pcidev->dev, in lpfc_mem_alloc()
134 if (!phba->lpfc_hrb_pool) in lpfc_mem_alloc()
137 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool", in lpfc_mem_alloc()
138 &phba->pcidev->dev, in lpfc_mem_alloc()
140 if (!phba->lpfc_drb_pool) in lpfc_mem_alloc()
142 phba->lpfc_hbq_pool = NULL; in lpfc_mem_alloc()
144 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool", in lpfc_mem_alloc()
145 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); in lpfc_mem_alloc()
146 if (!phba->lpfc_hbq_pool) in lpfc_mem_alloc()
148 phba->lpfc_hrb_pool = NULL; in lpfc_mem_alloc()
149 phba->lpfc_drb_pool = NULL; in lpfc_mem_alloc()
152 if (phba->cfg_EnableXLane) { in lpfc_mem_alloc()
153 phba->device_data_mem_pool = mempool_create_kmalloc_pool( in lpfc_mem_alloc()
156 if (!phba->device_data_mem_pool) in lpfc_mem_alloc()
159 phba->device_data_mem_pool = NULL; in lpfc_mem_alloc()
164 dma_pool_destroy(phba->lpfc_drb_pool); in lpfc_mem_alloc()
165 phba->lpfc_drb_pool = NULL; in lpfc_mem_alloc()
167 dma_pool_destroy(phba->lpfc_hrb_pool); in lpfc_mem_alloc()
168 phba->lpfc_hrb_pool = NULL; in lpfc_mem_alloc()
170 mempool_destroy(phba->rrq_pool); in lpfc_mem_alloc()
171 phba->rrq_pool = NULL; in lpfc_mem_alloc()
173 mempool_destroy(phba->nlp_mem_pool); in lpfc_mem_alloc()
174 phba->nlp_mem_pool = NULL; in lpfc_mem_alloc()
176 mempool_destroy(phba->mbox_mem_pool); in lpfc_mem_alloc()
177 phba->mbox_mem_pool = NULL; in lpfc_mem_alloc()
179 while (i--) in lpfc_mem_alloc()
180 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, in lpfc_mem_alloc()
181 pool->elements[i].phys); in lpfc_mem_alloc()
182 kfree(pool->elements); in lpfc_mem_alloc()
184 dma_pool_destroy(phba->lpfc_mbuf_pool); in lpfc_mem_alloc()
185 phba->lpfc_mbuf_pool = NULL; in lpfc_mem_alloc()
187 return -ENOMEM; in lpfc_mem_alloc()
193 phba->lpfc_nvmet_drb_pool = in lpfc_nvmet_mem_alloc()
195 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE, in lpfc_nvmet_mem_alloc()
197 if (!phba->lpfc_nvmet_drb_pool) { in lpfc_nvmet_mem_alloc()
199 "6024 Can't enable NVME Target - no memory\n"); in lpfc_nvmet_mem_alloc()
200 return -ENOMEM; in lpfc_nvmet_mem_alloc()
206 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
207 * @phba: HBA to free memory for
209 * Description: Free the memory allocated by lpfc_mem_alloc routine. This
218 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mem_free()
223 dma_pool_destroy(phba->lpfc_nvmet_drb_pool); in lpfc_mem_free()
224 phba->lpfc_nvmet_drb_pool = NULL; in lpfc_mem_free()
226 dma_pool_destroy(phba->lpfc_drb_pool); in lpfc_mem_free()
227 phba->lpfc_drb_pool = NULL; in lpfc_mem_free()
229 dma_pool_destroy(phba->lpfc_hrb_pool); in lpfc_mem_free()
230 phba->lpfc_hrb_pool = NULL; in lpfc_mem_free()
232 dma_pool_destroy(phba->lpfc_hbq_pool); in lpfc_mem_free()
233 phba->lpfc_hbq_pool = NULL; in lpfc_mem_free()
235 mempool_destroy(phba->rrq_pool); in lpfc_mem_free()
236 phba->rrq_pool = NULL; in lpfc_mem_free()
238 /* Free NLP memory pool */ in lpfc_mem_free()
239 mempool_destroy(phba->nlp_mem_pool); in lpfc_mem_free()
240 phba->nlp_mem_pool = NULL; in lpfc_mem_free()
241 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { in lpfc_mem_free()
242 mempool_destroy(phba->active_rrq_pool); in lpfc_mem_free()
243 phba->active_rrq_pool = NULL; in lpfc_mem_free()
246 /* Free mbox memory pool */ in lpfc_mem_free()
247 mempool_destroy(phba->mbox_mem_pool); in lpfc_mem_free()
248 phba->mbox_mem_pool = NULL; in lpfc_mem_free()
250 /* Free MBUF memory pool */ in lpfc_mem_free()
251 for (i = 0; i < pool->current_count; i++) in lpfc_mem_free()
252 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, in lpfc_mem_free()
253 pool->elements[i].phys); in lpfc_mem_free()
254 kfree(pool->elements); in lpfc_mem_free()
256 dma_pool_destroy(phba->lpfc_mbuf_pool); in lpfc_mem_free()
257 phba->lpfc_mbuf_pool = NULL; in lpfc_mem_free()
259 /* Free Device Data memory pool */ in lpfc_mem_free()
260 if (phba->device_data_mem_pool) { in lpfc_mem_free()
262 while (!list_empty(&phba->luns)) { in lpfc_mem_free()
263 device_data = list_first_entry(&phba->luns, in lpfc_mem_free()
266 list_del(&device_data->listentry); in lpfc_mem_free()
267 mempool_free(device_data, phba->device_data_mem_pool); in lpfc_mem_free()
269 mempool_destroy(phba->device_data_mem_pool); in lpfc_mem_free()
271 phba->device_data_mem_pool = NULL; in lpfc_mem_free()
276 * lpfc_mem_free_all - Frees all PCI and driver memory
277 * @phba: HBA to free memory for
279 * Description: Free memory from PCI and driver memory pools and also those
281 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
289 struct lpfc_sli *psli = &phba->sli; in lpfc_mem_free_all()
293 /* Free memory used in mailbox queue back to mailbox memory pool */ in lpfc_mem_free_all()
294 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { in lpfc_mem_free_all()
295 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); in lpfc_mem_free_all()
297 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mem_free_all()
300 list_del(&mbox->list); in lpfc_mem_free_all()
301 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_mem_free_all()
303 /* Free memory used in mailbox cmpl list back to mailbox memory pool */ in lpfc_mem_free_all()
304 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { in lpfc_mem_free_all()
305 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); in lpfc_mem_free_all()
307 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mem_free_all()
310 list_del(&mbox->list); in lpfc_mem_free_all()
311 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_mem_free_all()
313 /* Free the active mailbox command back to the mailbox memory pool */ in lpfc_mem_free_all()
314 spin_lock_irq(&phba->hbalock); in lpfc_mem_free_all()
315 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; in lpfc_mem_free_all()
316 spin_unlock_irq(&phba->hbalock); in lpfc_mem_free_all()
317 if (psli->mbox_active) { in lpfc_mem_free_all()
318 mbox = psli->mbox_active; in lpfc_mem_free_all()
319 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); in lpfc_mem_free_all()
321 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mem_free_all()
324 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_mem_free_all()
325 psli->mbox_active = NULL; in lpfc_mem_free_all()
328 /* Free and destroy all the allocated memory pools */ in lpfc_mem_free_all()
331 /* Free DMA buffer memory pool */ in lpfc_mem_free_all()
332 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); in lpfc_mem_free_all()
333 phba->lpfc_sg_dma_buf_pool = NULL; in lpfc_mem_free_all()
335 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); in lpfc_mem_free_all()
336 phba->lpfc_cmd_rsp_buf_pool = NULL; in lpfc_mem_free_all()
339 if (phba->cgn_i) { in lpfc_mem_free_all()
340 dma_free_coherent(&phba->pcidev->dev, in lpfc_mem_free_all()
342 phba->cgn_i->virt, phba->cgn_i->phys); in lpfc_mem_free_all()
343 kfree(phba->cgn_i); in lpfc_mem_free_all()
344 phba->cgn_i = NULL; in lpfc_mem_free_all()
348 kfree(phba->rxtable); in lpfc_mem_free_all()
349 phba->rxtable = NULL; in lpfc_mem_free_all()
352 kfree(psli->iocbq_lookup); in lpfc_mem_free_all()
353 psli->iocbq_lookup = NULL; in lpfc_mem_free_all()
359 * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool
362 * @handle: used to return the DMA-mapped address of the mbuf
364 * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
369 * Notes: Not interrupt-safe. Must be called with no locks held. Takes
370 * phba->hbalock.
379 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mbuf_alloc()
383 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); in lpfc_mbuf_alloc()
385 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_mbuf_alloc()
386 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { in lpfc_mbuf_alloc()
387 pool->current_count--; in lpfc_mbuf_alloc()
388 ret = pool->elements[pool->current_count].virt; in lpfc_mbuf_alloc()
389 *handle = pool->elements[pool->current_count].phys; in lpfc_mbuf_alloc()
391 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_mbuf_alloc()
396 * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
399 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
404 * Notes: Must be called with phba->hbalock held to synchronize access to
412 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in __lpfc_mbuf_free()
414 if (pool->current_count < pool->max_count) { in __lpfc_mbuf_free()
415 pool->elements[pool->current_count].virt = virt; in __lpfc_mbuf_free()
416 pool->elements[pool->current_count].phys = dma; in __lpfc_mbuf_free()
417 pool->current_count++; in __lpfc_mbuf_free()
419 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma); in __lpfc_mbuf_free()
425 * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
428 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
433 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
442 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_mbuf_free()
444 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_mbuf_free()
449 * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the
453 * @handle: used to return the DMA-mapped address of the nvmet_buf
455 * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
467 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); in lpfc_nvmet_buf_alloc()
472 * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool
476 * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed
483 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); in lpfc_nvmet_buf_free()
487 * lpfc_els_hbq_alloc - Allocate an HBQ buffer
490 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
491 * pool along a non-DMA-mapped container for it.
493 * Notes: Not interrupt-safe. Must be called with no locks held.
508 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, in lpfc_els_hbq_alloc()
509 &hbqbp->dbuf.phys); in lpfc_els_hbq_alloc()
510 if (!hbqbp->dbuf.virt) { in lpfc_els_hbq_alloc()
514 hbqbp->total_size = LPFC_BPL_SIZE; in lpfc_els_hbq_alloc()
519 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
523 * Description: Frees both the container and the DMA-mapped buffer returned by
533 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); in lpfc_els_hbq_free()
539 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
542 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
543 * pool along a non-DMA-mapped container for it.
545 * Notes: Not interrupt-safe. Must be called with no locks held.
560 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, in lpfc_sli4_rb_alloc()
561 &dma_buf->hbuf.phys); in lpfc_sli4_rb_alloc()
562 if (!dma_buf->hbuf.virt) { in lpfc_sli4_rb_alloc()
566 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, in lpfc_sli4_rb_alloc()
567 &dma_buf->dbuf.phys); in lpfc_sli4_rb_alloc()
568 if (!dma_buf->dbuf.virt) { in lpfc_sli4_rb_alloc()
569 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, in lpfc_sli4_rb_alloc()
570 dma_buf->hbuf.phys); in lpfc_sli4_rb_alloc()
574 dma_buf->total_size = LPFC_DATA_BUF_SIZE; in lpfc_sli4_rb_alloc()
579 * lpfc_sli4_rb_free - Frees a receive buffer
583 * Description: Frees both the container and the DMA-mapped buffers returned by
593 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); in lpfc_sli4_rb_free()
594 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); in lpfc_sli4_rb_free()
599 * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer
602 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
603 * pool along a non-DMA-mapped container for it.
618 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, in lpfc_sli4_nvmet_alloc()
619 &dma_buf->hbuf.phys); in lpfc_sli4_nvmet_alloc()
620 if (!dma_buf->hbuf.virt) { in lpfc_sli4_nvmet_alloc()
624 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool, in lpfc_sli4_nvmet_alloc()
625 GFP_KERNEL, &dma_buf->dbuf.phys); in lpfc_sli4_nvmet_alloc()
626 if (!dma_buf->dbuf.virt) { in lpfc_sli4_nvmet_alloc()
627 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, in lpfc_sli4_nvmet_alloc()
628 dma_buf->hbuf.phys); in lpfc_sli4_nvmet_alloc()
632 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; in lpfc_sli4_nvmet_alloc()
637 * lpfc_sli4_nvmet_free - Frees a receive buffer
641 * Description: Frees both the container and the DMA-mapped buffers returned by
651 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); in lpfc_sli4_nvmet_free()
652 dma_pool_free(phba->lpfc_nvmet_drb_pool, in lpfc_sli4_nvmet_free()
653 dmab->dbuf.virt, dmab->dbuf.phys); in lpfc_sli4_nvmet_free()
658 * lpfc_in_buf_free - Free a DMA buffer
665 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
678 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { in lpfc_in_buf_free()
681 spin_lock_irqsave(&phba->hbalock, flags); in lpfc_in_buf_free()
682 if (!phba->hbq_in_use) { in lpfc_in_buf_free()
683 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_in_buf_free()
686 list_del(&hbq_entry->dbuf.list); in lpfc_in_buf_free()
687 if (hbq_entry->tag == -1) { in lpfc_in_buf_free()
688 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) in lpfc_in_buf_free()
693 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_in_buf_free()
695 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_in_buf_free()
702 * lpfc_rq_buf_free - Free a RQ DMA buffer
709 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
727 rqbp = rqb_entry->hrq->rqbp; in lpfc_rq_buf_free()
729 spin_lock_irqsave(&phba->hbalock, flags); in lpfc_rq_buf_free()
730 list_del(&rqb_entry->hbuf.list); in lpfc_rq_buf_free()
731 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); in lpfc_rq_buf_free()
732 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); in lpfc_rq_buf_free()
733 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); in lpfc_rq_buf_free()
734 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); in lpfc_rq_buf_free()
735 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); in lpfc_rq_buf_free()
740 rqb_entry->hrq->queue_id, in lpfc_rq_buf_free()
741 rqb_entry->hrq->host_index, in lpfc_rq_buf_free()
742 rqb_entry->hrq->hba_index, in lpfc_rq_buf_free()
743 rqb_entry->hrq->entry_count, in lpfc_rq_buf_free()
744 rqb_entry->drq->host_index, in lpfc_rq_buf_free()
745 rqb_entry->drq->hba_index); in lpfc_rq_buf_free()
746 (rqbp->rqb_free_buffer)(phba, rqb_entry); in lpfc_rq_buf_free()
748 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); in lpfc_rq_buf_free()
749 rqbp->buffer_count++; in lpfc_rq_buf_free()
752 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_rq_buf_free()