Lines Matching +full:memory +full:- +full:mapped
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
6 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
46 #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
48 #define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */
53 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; in lpfc_mem_alloc_active_rrq_pool_s4()
56 return -ENOMEM; in lpfc_mem_alloc_active_rrq_pool_s4()
57 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * in lpfc_mem_alloc_active_rrq_pool_s4()
59 phba->cfg_rrq_xri_bitmap_sz = bytes; in lpfc_mem_alloc_active_rrq_pool_s4()
60 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, in lpfc_mem_alloc_active_rrq_pool_s4()
62 if (!phba->active_rrq_pool) in lpfc_mem_alloc_active_rrq_pool_s4()
63 return -ENOMEM; in lpfc_mem_alloc_active_rrq_pool_s4()
69 * lpfc_mem_alloc - create and allocate all PCI and memory pools
74 * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
77 * Notes: Not interrupt-safe. Must be called with no locks held. If any
78 * allocation fails, frees all successfully allocated memory before returning.
82 * -ENOMEM on failure (if any memory allocations fail)
87 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mem_alloc()
91 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, in lpfc_mem_alloc()
94 if (!phba->lpfc_mbuf_pool) in lpfc_mem_alloc()
97 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, in lpfc_mem_alloc()
100 if (!pool->elements) in lpfc_mem_alloc()
103 pool->max_count = 0; in lpfc_mem_alloc()
104 pool->current_count = 0; in lpfc_mem_alloc()
106 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool, in lpfc_mem_alloc()
107 GFP_KERNEL, &pool->elements[i].phys); in lpfc_mem_alloc()
108 if (!pool->elements[i].virt) in lpfc_mem_alloc()
110 pool->max_count++; in lpfc_mem_alloc()
111 pool->current_count++; in lpfc_mem_alloc()
114 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, in lpfc_mem_alloc()
116 if (!phba->mbox_mem_pool) in lpfc_mem_alloc()
119 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, in lpfc_mem_alloc()
121 if (!phba->nlp_mem_pool) in lpfc_mem_alloc()
124 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_mem_alloc()
125 phba->rrq_pool = in lpfc_mem_alloc()
128 if (!phba->rrq_pool) in lpfc_mem_alloc()
130 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool", in lpfc_mem_alloc()
131 &phba->pcidev->dev, in lpfc_mem_alloc()
133 if (!phba->lpfc_hrb_pool) in lpfc_mem_alloc()
136 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool", in lpfc_mem_alloc()
137 &phba->pcidev->dev, in lpfc_mem_alloc()
139 if (!phba->lpfc_drb_pool) in lpfc_mem_alloc()
141 phba->lpfc_hbq_pool = NULL; in lpfc_mem_alloc()
143 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool", in lpfc_mem_alloc()
144 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); in lpfc_mem_alloc()
145 if (!phba->lpfc_hbq_pool) in lpfc_mem_alloc()
147 phba->lpfc_hrb_pool = NULL; in lpfc_mem_alloc()
148 phba->lpfc_drb_pool = NULL; in lpfc_mem_alloc()
151 if (phba->cfg_EnableXLane) { in lpfc_mem_alloc()
152 phba->device_data_mem_pool = mempool_create_kmalloc_pool( in lpfc_mem_alloc()
155 if (!phba->device_data_mem_pool) in lpfc_mem_alloc()
158 phba->device_data_mem_pool = NULL; in lpfc_mem_alloc()
163 dma_pool_destroy(phba->lpfc_drb_pool); in lpfc_mem_alloc()
164 phba->lpfc_drb_pool = NULL; in lpfc_mem_alloc()
166 dma_pool_destroy(phba->lpfc_hrb_pool); in lpfc_mem_alloc()
167 phba->lpfc_hrb_pool = NULL; in lpfc_mem_alloc()
169 mempool_destroy(phba->rrq_pool); in lpfc_mem_alloc()
170 phba->rrq_pool = NULL; in lpfc_mem_alloc()
172 mempool_destroy(phba->nlp_mem_pool); in lpfc_mem_alloc()
173 phba->nlp_mem_pool = NULL; in lpfc_mem_alloc()
175 mempool_destroy(phba->mbox_mem_pool); in lpfc_mem_alloc()
176 phba->mbox_mem_pool = NULL; in lpfc_mem_alloc()
178 while (i--) in lpfc_mem_alloc()
179 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, in lpfc_mem_alloc()
180 pool->elements[i].phys); in lpfc_mem_alloc()
181 kfree(pool->elements); in lpfc_mem_alloc()
183 dma_pool_destroy(phba->lpfc_mbuf_pool); in lpfc_mem_alloc()
184 phba->lpfc_mbuf_pool = NULL; in lpfc_mem_alloc()
186 return -ENOMEM; in lpfc_mem_alloc()
192 phba->lpfc_nvmet_drb_pool = in lpfc_nvmet_mem_alloc()
194 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE, in lpfc_nvmet_mem_alloc()
196 if (!phba->lpfc_nvmet_drb_pool) { in lpfc_nvmet_mem_alloc()
198 "6024 Can't enable NVME Target - no memory\n"); in lpfc_nvmet_mem_alloc()
199 return -ENOMEM; in lpfc_nvmet_mem_alloc()
205 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
206 * @phba: HBA to free memory for
208 * Description: Free the memory allocated by lpfc_mem_alloc routine. This
217 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mem_free()
222 dma_pool_destroy(phba->lpfc_nvmet_drb_pool); in lpfc_mem_free()
223 phba->lpfc_nvmet_drb_pool = NULL; in lpfc_mem_free()
225 dma_pool_destroy(phba->lpfc_drb_pool); in lpfc_mem_free()
226 phba->lpfc_drb_pool = NULL; in lpfc_mem_free()
228 dma_pool_destroy(phba->lpfc_hrb_pool); in lpfc_mem_free()
229 phba->lpfc_hrb_pool = NULL; in lpfc_mem_free()
231 dma_pool_destroy(phba->lpfc_hbq_pool); in lpfc_mem_free()
232 phba->lpfc_hbq_pool = NULL; in lpfc_mem_free()
234 mempool_destroy(phba->rrq_pool); in lpfc_mem_free()
235 phba->rrq_pool = NULL; in lpfc_mem_free()
237 /* Free NLP memory pool */ in lpfc_mem_free()
238 mempool_destroy(phba->nlp_mem_pool); in lpfc_mem_free()
239 phba->nlp_mem_pool = NULL; in lpfc_mem_free()
240 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { in lpfc_mem_free()
241 mempool_destroy(phba->active_rrq_pool); in lpfc_mem_free()
242 phba->active_rrq_pool = NULL; in lpfc_mem_free()
245 /* Free mbox memory pool */ in lpfc_mem_free()
246 mempool_destroy(phba->mbox_mem_pool); in lpfc_mem_free()
247 phba->mbox_mem_pool = NULL; in lpfc_mem_free()
249 /* Free MBUF memory pool */ in lpfc_mem_free()
250 for (i = 0; i < pool->current_count; i++) in lpfc_mem_free()
251 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, in lpfc_mem_free()
252 pool->elements[i].phys); in lpfc_mem_free()
253 kfree(pool->elements); in lpfc_mem_free()
255 dma_pool_destroy(phba->lpfc_mbuf_pool); in lpfc_mem_free()
256 phba->lpfc_mbuf_pool = NULL; in lpfc_mem_free()
258 /* Free Device Data memory pool */ in lpfc_mem_free()
259 if (phba->device_data_mem_pool) { in lpfc_mem_free()
261 while (!list_empty(&phba->luns)) { in lpfc_mem_free()
262 device_data = list_first_entry(&phba->luns, in lpfc_mem_free()
265 list_del(&device_data->listentry); in lpfc_mem_free()
266 mempool_free(device_data, phba->device_data_mem_pool); in lpfc_mem_free()
268 mempool_destroy(phba->device_data_mem_pool); in lpfc_mem_free()
270 phba->device_data_mem_pool = NULL; in lpfc_mem_free()
275 * lpfc_mem_free_all - Frees all PCI and driver memory
276 * @phba: HBA to free memory for
278 * Description: Free memory from PCI and driver memory pools and also those
280 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
288 struct lpfc_sli *psli = &phba->sli; in lpfc_mem_free_all()
292 /* Free memory used in mailbox queue back to mailbox memory pool */ in lpfc_mem_free_all()
293 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { in lpfc_mem_free_all()
294 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); in lpfc_mem_free_all()
296 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mem_free_all()
299 list_del(&mbox->list); in lpfc_mem_free_all()
300 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_mem_free_all()
302 /* Free memory used in mailbox cmpl list back to mailbox memory pool */ in lpfc_mem_free_all()
303 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { in lpfc_mem_free_all()
304 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); in lpfc_mem_free_all()
306 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mem_free_all()
309 list_del(&mbox->list); in lpfc_mem_free_all()
310 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_mem_free_all()
312 /* Free the active mailbox command back to the mailbox memory pool */ in lpfc_mem_free_all()
313 spin_lock_irq(&phba->hbalock); in lpfc_mem_free_all()
314 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; in lpfc_mem_free_all()
315 spin_unlock_irq(&phba->hbalock); in lpfc_mem_free_all()
316 if (psli->mbox_active) { in lpfc_mem_free_all()
317 mbox = psli->mbox_active; in lpfc_mem_free_all()
318 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); in lpfc_mem_free_all()
320 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mem_free_all()
323 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_mem_free_all()
324 psli->mbox_active = NULL; in lpfc_mem_free_all()
327 /* Free and destroy all the allocated memory pools */ in lpfc_mem_free_all()
330 /* Free DMA buffer memory pool */ in lpfc_mem_free_all()
331 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); in lpfc_mem_free_all()
332 phba->lpfc_sg_dma_buf_pool = NULL; in lpfc_mem_free_all()
334 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); in lpfc_mem_free_all()
335 phba->lpfc_cmd_rsp_buf_pool = NULL; in lpfc_mem_free_all()
338 kfree(psli->iocbq_lookup); in lpfc_mem_free_all()
339 psli->iocbq_lookup = NULL; in lpfc_mem_free_all()
345 * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool
348 * @handle: used to return the DMA-mapped address of the mbuf
350 * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
355 * Notes: Not interrupt-safe. Must be called with no locks held. Takes
356 * phba->hbalock.
365 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mbuf_alloc()
369 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); in lpfc_mbuf_alloc()
371 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_mbuf_alloc()
372 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { in lpfc_mbuf_alloc()
373 pool->current_count--; in lpfc_mbuf_alloc()
374 ret = pool->elements[pool->current_count].virt; in lpfc_mbuf_alloc()
375 *handle = pool->elements[pool->current_count].phys; in lpfc_mbuf_alloc()
377 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_mbuf_alloc()
382 * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
385 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
390 * Notes: Must be called with phba->hbalock held to synchronize access to
398 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in __lpfc_mbuf_free()
400 if (pool->current_count < pool->max_count) { in __lpfc_mbuf_free()
401 pool->elements[pool->current_count].virt = virt; in __lpfc_mbuf_free()
402 pool->elements[pool->current_count].phys = dma; in __lpfc_mbuf_free()
403 pool->current_count++; in __lpfc_mbuf_free()
405 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma); in __lpfc_mbuf_free()
411 * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
414 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
419 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
428 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_mbuf_free()
430 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_mbuf_free()
435 * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the
439 * @handle: used to return the DMA-mapped address of the nvmet_buf
441 * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
453 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); in lpfc_nvmet_buf_alloc()
458 * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool
462 * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed
469 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); in lpfc_nvmet_buf_free()
473 * lpfc_els_hbq_alloc - Allocate an HBQ buffer
476 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
477 * pool along a non-DMA-mapped container for it.
479 * Notes: Not interrupt-safe. Must be called with no locks held.
494 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, in lpfc_els_hbq_alloc()
495 &hbqbp->dbuf.phys); in lpfc_els_hbq_alloc()
496 if (!hbqbp->dbuf.virt) { in lpfc_els_hbq_alloc()
500 hbqbp->total_size = LPFC_BPL_SIZE; in lpfc_els_hbq_alloc()
505 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
509 * Description: Frees both the container and the DMA-mapped buffer returned by
519 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); in lpfc_els_hbq_free()
525 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
528 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
529 * pool along a non-DMA-mapped container for it.
531 * Notes: Not interrupt-safe. Must be called with no locks held.
546 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, in lpfc_sli4_rb_alloc()
547 &dma_buf->hbuf.phys); in lpfc_sli4_rb_alloc()
548 if (!dma_buf->hbuf.virt) { in lpfc_sli4_rb_alloc()
552 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, in lpfc_sli4_rb_alloc()
553 &dma_buf->dbuf.phys); in lpfc_sli4_rb_alloc()
554 if (!dma_buf->dbuf.virt) { in lpfc_sli4_rb_alloc()
555 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, in lpfc_sli4_rb_alloc()
556 dma_buf->hbuf.phys); in lpfc_sli4_rb_alloc()
560 dma_buf->total_size = LPFC_DATA_BUF_SIZE; in lpfc_sli4_rb_alloc()
565 * lpfc_sli4_rb_free - Frees a receive buffer
569 * Description: Frees both the container and the DMA-mapped buffers returned by
579 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); in lpfc_sli4_rb_free()
580 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); in lpfc_sli4_rb_free()
585 * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer
588 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
589 * pool along a non-DMA-mapped container for it.
591 * Notes: Not interrupt-safe. Must be called with no locks held.
606 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, in lpfc_sli4_nvmet_alloc()
607 &dma_buf->hbuf.phys); in lpfc_sli4_nvmet_alloc()
608 if (!dma_buf->hbuf.virt) { in lpfc_sli4_nvmet_alloc()
612 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool, in lpfc_sli4_nvmet_alloc()
613 GFP_KERNEL, &dma_buf->dbuf.phys); in lpfc_sli4_nvmet_alloc()
614 if (!dma_buf->dbuf.virt) { in lpfc_sli4_nvmet_alloc()
615 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, in lpfc_sli4_nvmet_alloc()
616 dma_buf->hbuf.phys); in lpfc_sli4_nvmet_alloc()
620 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; in lpfc_sli4_nvmet_alloc()
625 * lpfc_sli4_nvmet_free - Frees a receive buffer
629 * Description: Frees both the container and the DMA-mapped buffers returned by
639 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); in lpfc_sli4_nvmet_free()
640 dma_pool_free(phba->lpfc_nvmet_drb_pool, in lpfc_sli4_nvmet_free()
641 dmab->dbuf.virt, dmab->dbuf.phys); in lpfc_sli4_nvmet_free()
646 * lpfc_in_buf_free - Free a DMA buffer
653 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
666 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { in lpfc_in_buf_free()
669 spin_lock_irqsave(&phba->hbalock, flags); in lpfc_in_buf_free()
670 if (!phba->hbq_in_use) { in lpfc_in_buf_free()
671 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_in_buf_free()
674 list_del(&hbq_entry->dbuf.list); in lpfc_in_buf_free()
675 if (hbq_entry->tag == -1) { in lpfc_in_buf_free()
676 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) in lpfc_in_buf_free()
681 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_in_buf_free()
683 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_in_buf_free()
690 * lpfc_rq_buf_free - Free a RQ DMA buffer
697 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
715 rqbp = rqb_entry->hrq->rqbp; in lpfc_rq_buf_free()
717 spin_lock_irqsave(&phba->hbalock, flags); in lpfc_rq_buf_free()
718 list_del(&rqb_entry->hbuf.list); in lpfc_rq_buf_free()
719 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); in lpfc_rq_buf_free()
720 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); in lpfc_rq_buf_free()
721 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); in lpfc_rq_buf_free()
722 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); in lpfc_rq_buf_free()
723 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); in lpfc_rq_buf_free()
725 (rqbp->rqb_free_buffer)(phba, rqb_entry); in lpfc_rq_buf_free()
729 rqb_entry->hrq->queue_id, in lpfc_rq_buf_free()
730 rqb_entry->hrq->host_index, in lpfc_rq_buf_free()
731 rqb_entry->hrq->hba_index, in lpfc_rq_buf_free()
732 rqb_entry->hrq->entry_count, in lpfc_rq_buf_free()
733 rqb_entry->drq->host_index, in lpfc_rq_buf_free()
734 rqb_entry->drq->hba_index); in lpfc_rq_buf_free()
736 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); in lpfc_rq_buf_free()
737 rqbp->buffer_count++; in lpfc_rq_buf_free()
740 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_rq_buf_free()