Lines Matching full:ring
32 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring, in op32_idx2desc() argument
38 *meta = &(ring->meta[slot]); in op32_idx2desc()
39 desc = ring->descbase; in op32_idx2desc()
45 static void op32_fill_descriptor(struct b43legacy_dmaring *ring, in op32_fill_descriptor() argument
50 struct b43legacy_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor()
57 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor()
62 addr |= ring->dev->dma.translation; in op32_fill_descriptor()
63 ctl = (bufsize - ring->frameoffset) in op32_fill_descriptor()
65 if (slot == ring->nr_slots - 1) in op32_fill_descriptor()
80 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) in op32_poke_tx() argument
82 b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX, in op32_poke_tx()
86 static void op32_tx_suspend(struct b43legacy_dmaring *ring) in op32_tx_suspend() argument
88 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, in op32_tx_suspend()
89 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) in op32_tx_suspend()
93 static void op32_tx_resume(struct b43legacy_dmaring *ring) in op32_tx_resume() argument
95 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, in op32_tx_resume()
96 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) in op32_tx_resume()
100 static int op32_get_current_rxslot(struct b43legacy_dmaring *ring) in op32_get_current_rxslot() argument
104 val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS); in op32_get_current_rxslot()
110 static void op32_set_current_rxslot(struct b43legacy_dmaring *ring, in op32_set_current_rxslot() argument
113 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, in op32_set_current_rxslot()
117 static inline int free_slots(struct b43legacy_dmaring *ring) in free_slots() argument
119 return (ring->nr_slots - ring->used_slots); in free_slots()
122 static inline int next_slot(struct b43legacy_dmaring *ring, int slot) in next_slot() argument
124 B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); in next_slot()
125 if (slot == ring->nr_slots - 1) in next_slot()
130 static inline int prev_slot(struct b43legacy_dmaring *ring, int slot) in prev_slot() argument
132 B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); in prev_slot()
134 return ring->nr_slots - 1; in prev_slot()
139 static void update_max_used_slots(struct b43legacy_dmaring *ring, in update_max_used_slots() argument
142 if (current_used_slots <= ring->max_used_slots) in update_max_used_slots()
144 ring->max_used_slots = current_used_slots; in update_max_used_slots()
145 if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE)) in update_max_used_slots()
146 b43legacydbg(ring->dev->wl, in update_max_used_slots()
147 "max_used_slots increased to %d on %s ring %d\n", in update_max_used_slots()
148 ring->max_used_slots, in update_max_used_slots()
149 ring->tx ? "TX" : "RX", in update_max_used_slots()
150 ring->index); in update_max_used_slots()
154 void update_max_used_slots(struct b43legacy_dmaring *ring, in update_max_used_slots() argument
161 int request_slot(struct b43legacy_dmaring *ring) in request_slot() argument
165 B43legacy_WARN_ON(!ring->tx); in request_slot()
166 B43legacy_WARN_ON(ring->stopped); in request_slot()
167 B43legacy_WARN_ON(free_slots(ring) == 0); in request_slot()
169 slot = next_slot(ring, ring->current_slot); in request_slot()
170 ring->current_slot = slot; in request_slot()
171 ring->used_slots++; in request_slot()
173 update_max_used_slots(ring, ring->used_slots); in request_slot()
178 /* Mac80211-queue to b43legacy-ring mapping */
183 struct b43legacy_dmaring *ring; in priority_to_txring() local
185 /*FIXME: For now we always run on TX-ring-1 */ in priority_to_txring()
194 ring = dev->dma.tx_ring3; in priority_to_txring()
197 ring = dev->dma.tx_ring2; in priority_to_txring()
200 ring = dev->dma.tx_ring1; in priority_to_txring()
203 ring = dev->dma.tx_ring0; in priority_to_txring()
206 ring = dev->dma.tx_ring4; in priority_to_txring()
209 ring = dev->dma.tx_ring5; in priority_to_txring()
213 return ring; in priority_to_txring()
216 /* Bcm4301-ring to mac80211-queue mapping */
217 static inline int txring_to_priority(struct b43legacy_dmaring *ring) in txring_to_priority() argument
225 return idx_to_prio[ring->index]; in txring_to_priority()
247 dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, in map_descbuffer() argument
255 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, in map_descbuffer()
259 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, in map_descbuffer()
267 void unmap_descbuffer(struct b43legacy_dmaring *ring, in unmap_descbuffer() argument
273 dma_unmap_single(ring->dev->dev->dma_dev, in unmap_descbuffer()
277 dma_unmap_single(ring->dev->dev->dma_dev, in unmap_descbuffer()
283 void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, in sync_descbuffer_for_cpu() argument
287 B43legacy_WARN_ON(ring->tx); in sync_descbuffer_for_cpu()
289 dma_sync_single_for_cpu(ring->dev->dev->dma_dev, in sync_descbuffer_for_cpu()
294 void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, in sync_descbuffer_for_device() argument
298 B43legacy_WARN_ON(ring->tx); in sync_descbuffer_for_device()
300 dma_sync_single_for_device(ring->dev->dev->dma_dev, in sync_descbuffer_for_device()
305 void free_descriptor_buffer(struct b43legacy_dmaring *ring, in free_descriptor_buffer() argument
318 static int alloc_ringmemory(struct b43legacy_dmaring *ring) in alloc_ringmemory() argument
321 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, in alloc_ringmemory()
323 &(ring->dmabase), GFP_KERNEL); in alloc_ringmemory()
324 if (!ring->descbase) in alloc_ringmemory()
330 static void free_ringmemory(struct b43legacy_dmaring *ring) in free_ringmemory() argument
332 dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE, in free_ringmemory()
333 ring->descbase, ring->dmabase); in free_ringmemory()
411 static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, in b43legacy_dma_mapping_error() argument
416 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) in b43legacy_dma_mapping_error()
419 switch (ring->type) { in b43legacy_dma_mapping_error()
435 unmap_descbuffer(ring, addr, buffersize, dma_to_device); in b43legacy_dma_mapping_error()
440 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, in setup_rx_descbuffer() argument
450 B43legacy_WARN_ON(ring->tx); in setup_rx_descbuffer()
452 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); in setup_rx_descbuffer()
455 dmaaddr = map_descbuffer(ring, skb->data, in setup_rx_descbuffer()
456 ring->rx_buffersize, 0); in setup_rx_descbuffer()
457 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { in setup_rx_descbuffer()
463 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); in setup_rx_descbuffer()
466 dmaaddr = map_descbuffer(ring, skb->data, in setup_rx_descbuffer()
467 ring->rx_buffersize, 0); in setup_rx_descbuffer()
470 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { in setup_rx_descbuffer()
477 op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0); in setup_rx_descbuffer()
488 * This is used for an RX ring only.
490 static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring) in alloc_initial_descbuffers() argument
497 for (i = 0; i < ring->nr_slots; i++) { in alloc_initial_descbuffers()
498 desc = op32_idx2desc(ring, i, &meta); in alloc_initial_descbuffers()
500 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); in alloc_initial_descbuffers()
502 b43legacyerr(ring->dev->wl, in alloc_initial_descbuffers()
508 ring->used_slots = ring->nr_slots; in alloc_initial_descbuffers()
515 desc = op32_idx2desc(ring, i, &meta); in alloc_initial_descbuffers()
517 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); in alloc_initial_descbuffers()
524 * Reset the controller, write the ring busaddress
527 static int dmacontroller_setup(struct b43legacy_dmaring *ring) in dmacontroller_setup() argument
532 u32 trans = ring->dev->dma.translation; in dmacontroller_setup()
533 u32 ringbase = (u32)(ring->dmabase); in dmacontroller_setup()
535 if (ring->tx) { in dmacontroller_setup()
541 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value); in dmacontroller_setup()
542 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, in dmacontroller_setup()
546 err = alloc_initial_descbuffers(ring); in dmacontroller_setup()
552 value = (ring->frameoffset << in dmacontroller_setup()
557 b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value); in dmacontroller_setup()
558 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, in dmacontroller_setup()
561 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200); in dmacontroller_setup()
569 static void dmacontroller_cleanup(struct b43legacy_dmaring *ring) in dmacontroller_cleanup() argument
571 if (ring->tx) { in dmacontroller_cleanup()
572 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, in dmacontroller_cleanup()
573 ring->type); in dmacontroller_cleanup()
574 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0); in dmacontroller_cleanup()
576 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, in dmacontroller_cleanup()
577 ring->type); in dmacontroller_cleanup()
578 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0); in dmacontroller_cleanup()
582 static void free_all_descbuffers(struct b43legacy_dmaring *ring) in free_all_descbuffers() argument
587 if (!ring->used_slots) in free_all_descbuffers()
589 for (i = 0; i < ring->nr_slots; i++) { in free_all_descbuffers()
590 op32_idx2desc(ring, i, &meta); in free_all_descbuffers()
593 B43legacy_WARN_ON(!ring->tx); in free_all_descbuffers()
596 if (ring->tx) in free_all_descbuffers()
597 unmap_descbuffer(ring, meta->dmaaddr, in free_all_descbuffers()
600 unmap_descbuffer(ring, meta->dmaaddr, in free_all_descbuffers()
601 ring->rx_buffersize, 0); in free_all_descbuffers()
602 free_descriptor_buffer(ring, meta, 0); in free_all_descbuffers()
629 struct b43legacy_dmaring *ring; in b43legacy_setup_dmaring() local
634 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in b43legacy_setup_dmaring()
635 if (!ring) in b43legacy_setup_dmaring()
637 ring->type = type; in b43legacy_setup_dmaring()
638 ring->dev = dev; in b43legacy_setup_dmaring()
644 ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta), in b43legacy_setup_dmaring()
646 if (!ring->meta) in b43legacy_setup_dmaring()
649 ring->txhdr_cache = kcalloc(nr_slots, in b43legacy_setup_dmaring()
652 if (!ring->txhdr_cache) in b43legacy_setup_dmaring()
656 dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, in b43legacy_setup_dmaring()
660 if (b43legacy_dma_mapping_error(ring, dma_test, in b43legacy_setup_dmaring()
663 kfree(ring->txhdr_cache); in b43legacy_setup_dmaring()
664 ring->txhdr_cache = kcalloc(nr_slots, in b43legacy_setup_dmaring()
667 if (!ring->txhdr_cache) in b43legacy_setup_dmaring()
671 ring->txhdr_cache, in b43legacy_setup_dmaring()
675 if (b43legacy_dma_mapping_error(ring, dma_test, in b43legacy_setup_dmaring()
685 ring->nr_slots = nr_slots; in b43legacy_setup_dmaring()
686 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); in b43legacy_setup_dmaring()
687 ring->index = controller_index; in b43legacy_setup_dmaring()
689 ring->tx = true; in b43legacy_setup_dmaring()
690 ring->current_slot = -1; in b43legacy_setup_dmaring()
692 if (ring->index == 0) { in b43legacy_setup_dmaring()
693 ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE; in b43legacy_setup_dmaring()
694 ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET; in b43legacy_setup_dmaring()
695 } else if (ring->index == 3) { in b43legacy_setup_dmaring()
696 ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE; in b43legacy_setup_dmaring()
697 ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET; in b43legacy_setup_dmaring()
702 ring->last_injected_overflow = jiffies; in b43legacy_setup_dmaring()
705 err = alloc_ringmemory(ring); in b43legacy_setup_dmaring()
708 err = dmacontroller_setup(ring); in b43legacy_setup_dmaring()
713 return ring; in b43legacy_setup_dmaring()
716 free_ringmemory(ring); in b43legacy_setup_dmaring()
718 kfree(ring->txhdr_cache); in b43legacy_setup_dmaring()
720 kfree(ring->meta); in b43legacy_setup_dmaring()
722 kfree(ring); in b43legacy_setup_dmaring()
723 ring = NULL; in b43legacy_setup_dmaring()
728 static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring) in b43legacy_destroy_dmaring() argument
730 if (!ring) in b43legacy_destroy_dmaring()
733 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:" in b43legacy_destroy_dmaring()
734 " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base, in b43legacy_destroy_dmaring()
735 (ring->tx) ? "TX" : "RX", ring->max_used_slots, in b43legacy_destroy_dmaring()
736 ring->nr_slots); in b43legacy_destroy_dmaring()
740 dmacontroller_cleanup(ring); in b43legacy_destroy_dmaring()
741 free_all_descbuffers(ring); in b43legacy_destroy_dmaring()
742 free_ringmemory(ring); in b43legacy_destroy_dmaring()
744 kfree(ring->txhdr_cache); in b43legacy_destroy_dmaring()
745 kfree(ring->meta); in b43legacy_destroy_dmaring()
746 kfree(ring); in b43legacy_destroy_dmaring()
779 struct b43legacy_dmaring *ring; in b43legacy_dma_init() local
800 ring = b43legacy_setup_dmaring(dev, 0, 1, type); in b43legacy_dma_init()
801 if (!ring) in b43legacy_dma_init()
803 dma->tx_ring0 = ring; in b43legacy_dma_init()
805 ring = b43legacy_setup_dmaring(dev, 1, 1, type); in b43legacy_dma_init()
806 if (!ring) in b43legacy_dma_init()
808 dma->tx_ring1 = ring; in b43legacy_dma_init()
810 ring = b43legacy_setup_dmaring(dev, 2, 1, type); in b43legacy_dma_init()
811 if (!ring) in b43legacy_dma_init()
813 dma->tx_ring2 = ring; in b43legacy_dma_init()
815 ring = b43legacy_setup_dmaring(dev, 3, 1, type); in b43legacy_dma_init()
816 if (!ring) in b43legacy_dma_init()
818 dma->tx_ring3 = ring; in b43legacy_dma_init()
820 ring = b43legacy_setup_dmaring(dev, 4, 1, type); in b43legacy_dma_init()
821 if (!ring) in b43legacy_dma_init()
823 dma->tx_ring4 = ring; in b43legacy_dma_init()
825 ring = b43legacy_setup_dmaring(dev, 5, 1, type); in b43legacy_dma_init()
826 if (!ring) in b43legacy_dma_init()
828 dma->tx_ring5 = ring; in b43legacy_dma_init()
831 ring = b43legacy_setup_dmaring(dev, 0, 0, type); in b43legacy_dma_init()
832 if (!ring) in b43legacy_dma_init()
834 dma->rx_ring0 = ring; in b43legacy_dma_init()
837 ring = b43legacy_setup_dmaring(dev, 3, 0, type); in b43legacy_dma_init()
838 if (!ring) in b43legacy_dma_init()
840 dma->rx_ring3 = ring; in b43legacy_dma_init()
873 static u16 generate_cookie(struct b43legacy_dmaring *ring, in generate_cookie() argument
884 switch (ring->index) { in generate_cookie()
916 struct b43legacy_dmaring *ring = NULL; in parse_cookie() local
920 ring = dma->tx_ring0; in parse_cookie()
923 ring = dma->tx_ring1; in parse_cookie()
926 ring = dma->tx_ring2; in parse_cookie()
929 ring = dma->tx_ring3; in parse_cookie()
932 ring = dma->tx_ring4; in parse_cookie()
935 ring = dma->tx_ring5; in parse_cookie()
941 B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); in parse_cookie()
943 return ring; in parse_cookie()
946 static int dma_tx_fragment(struct b43legacy_dmaring *ring, in dma_tx_fragment() argument
962 old_top_slot = ring->current_slot; in dma_tx_fragment()
963 old_used_slots = ring->used_slots; in dma_tx_fragment()
966 slot = request_slot(ring); in dma_tx_fragment()
967 desc = op32_idx2desc(ring, slot, &meta_hdr); in dma_tx_fragment()
970 header = &(ring->txhdr_cache[slot * sizeof( in dma_tx_fragment()
972 err = b43legacy_generate_txhdr(ring->dev, header, in dma_tx_fragment()
974 generate_cookie(ring, slot)); in dma_tx_fragment()
976 ring->current_slot = old_top_slot; in dma_tx_fragment()
977 ring->used_slots = old_used_slots; in dma_tx_fragment()
981 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, in dma_tx_fragment()
983 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, in dma_tx_fragment()
985 ring->current_slot = old_top_slot; in dma_tx_fragment()
986 ring->used_slots = old_used_slots; in dma_tx_fragment()
989 op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr, in dma_tx_fragment()
993 slot = request_slot(ring); in dma_tx_fragment()
994 desc = op32_idx2desc(ring, slot, &meta); in dma_tx_fragment()
1000 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); in dma_tx_fragment()
1002 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { in dma_tx_fragment()
1005 ring->current_slot = old_top_slot; in dma_tx_fragment()
1006 ring->used_slots = old_used_slots; in dma_tx_fragment()
1021 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); in dma_tx_fragment()
1022 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { in dma_tx_fragment()
1023 ring->current_slot = old_top_slot; in dma_tx_fragment()
1024 ring->used_slots = old_used_slots; in dma_tx_fragment()
1030 op32_fill_descriptor(ring, desc, meta->dmaaddr, in dma_tx_fragment()
1035 op32_poke_tx(ring, next_slot(ring, slot)); in dma_tx_fragment()
1041 unmap_descbuffer(ring, meta_hdr->dmaaddr, in dma_tx_fragment()
1047 int should_inject_overflow(struct b43legacy_dmaring *ring) in should_inject_overflow() argument
1050 if (unlikely(b43legacy_debug(ring->dev, in should_inject_overflow()
1056 next_overflow = ring->last_injected_overflow + HZ; in should_inject_overflow()
1058 ring->last_injected_overflow = jiffies; in should_inject_overflow()
1059 b43legacydbg(ring->dev->wl, in should_inject_overflow()
1060 "Injecting TX ring overflow on " in should_inject_overflow()
1061 "DMA controller %d\n", ring->index); in should_inject_overflow()
1072 struct b43legacy_dmaring *ring; in b43legacy_dma_tx() local
1075 ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); in b43legacy_dma_tx()
1076 B43legacy_WARN_ON(!ring->tx); in b43legacy_dma_tx()
1078 if (unlikely(ring->stopped)) { in b43legacy_dma_tx()
1088 if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) { in b43legacy_dma_tx()
1097 err = dma_tx_fragment(ring, &skb); in b43legacy_dma_tx()
1108 if ((free_slots(ring) < SLOTS_PER_PACKET) || in b43legacy_dma_tx()
1109 should_inject_overflow(ring)) { in b43legacy_dma_tx()
1110 /* This TX ring is full. */ in b43legacy_dma_tx()
1114 ring->stopped = true; in b43legacy_dma_tx()
1116 b43legacydbg(dev->wl, "Stopped TX ring %d\n", in b43legacy_dma_tx()
1117 ring->index); in b43legacy_dma_tx()
1125 struct b43legacy_dmaring *ring; in b43legacy_dma_handle_txstatus() local
1131 ring = parse_cookie(dev, status->cookie, &slot); in b43legacy_dma_handle_txstatus()
1132 if (unlikely(!ring)) in b43legacy_dma_handle_txstatus()
1134 B43legacy_WARN_ON(!ring->tx); in b43legacy_dma_handle_txstatus()
1136 /* Sanity check: TX packets are processed in-order on one ring. in b43legacy_dma_handle_txstatus()
1139 firstused = ring->current_slot - ring->used_slots + 1; in b43legacy_dma_handle_txstatus()
1141 firstused = ring->nr_slots + firstused; in b43legacy_dma_handle_txstatus()
1147 "ring %d. Expected %d, but got %d\n", in b43legacy_dma_handle_txstatus()
1148 ring->index, firstused, slot); in b43legacy_dma_handle_txstatus()
1153 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in b43legacy_dma_handle_txstatus()
1154 op32_idx2desc(ring, slot, &meta); in b43legacy_dma_handle_txstatus()
1157 unmap_descbuffer(ring, meta->dmaaddr, in b43legacy_dma_handle_txstatus()
1160 unmap_descbuffer(ring, meta->dmaaddr, in b43legacy_dma_handle_txstatus()
1215 ring->used_slots--; in b43legacy_dma_handle_txstatus()
1219 slot = next_slot(ring, slot); in b43legacy_dma_handle_txstatus()
1222 if (ring->stopped) { in b43legacy_dma_handle_txstatus()
1223 B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); in b43legacy_dma_handle_txstatus()
1224 ring->stopped = false; in b43legacy_dma_handle_txstatus()
1227 if (dev->wl->tx_queue_stopped[ring->queue_prio]) { in b43legacy_dma_handle_txstatus()
1228 dev->wl->tx_queue_stopped[ring->queue_prio] = 0; in b43legacy_dma_handle_txstatus()
1232 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); in b43legacy_dma_handle_txstatus()
1234 b43legacydbg(dev->wl, "Woke up TX ring %d\n", in b43legacy_dma_handle_txstatus()
1235 ring->index); in b43legacy_dma_handle_txstatus()
1241 static void dma_rx(struct b43legacy_dmaring *ring, in dma_rx() argument
1252 desc = op32_idx2desc(ring, *slot, &meta); in dma_rx()
1254 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); in dma_rx()
1257 if (ring->index == 3) { in dma_rx()
1270 b43legacy_handle_hwtxstatus(ring->dev, hw); in dma_rx()
1272 sync_descbuffer_for_device(ring, meta->dmaaddr, in dma_rx()
1273 ring->rx_buffersize); in dma_rx()
1289 sync_descbuffer_for_device(ring, meta->dmaaddr, in dma_rx()
1290 ring->rx_buffersize); in dma_rx()
1294 if (unlikely(len > ring->rx_buffersize)) { in dma_rx()
1304 desc = op32_idx2desc(ring, *slot, &meta); in dma_rx()
1306 sync_descbuffer_for_device(ring, meta->dmaaddr, in dma_rx()
1307 ring->rx_buffersize); in dma_rx()
1308 *slot = next_slot(ring, *slot); in dma_rx()
1310 tmp -= ring->rx_buffersize; in dma_rx()
1314 b43legacyerr(ring->dev->wl, "DMA RX buffer too small " in dma_rx()
1316 len, ring->rx_buffersize, cnt); in dma_rx()
1321 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); in dma_rx()
1323 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()" in dma_rx()
1325 sync_descbuffer_for_device(ring, dmaaddr, in dma_rx()
1326 ring->rx_buffersize); in dma_rx()
1330 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); in dma_rx()
1331 skb_put(skb, len + ring->frameoffset); in dma_rx()
1332 skb_pull(skb, ring->frameoffset); in dma_rx()
1334 b43legacy_rx(ring->dev, skb, rxhdr); in dma_rx()
1339 void b43legacy_dma_rx(struct b43legacy_dmaring *ring) in b43legacy_dma_rx() argument
1345 B43legacy_WARN_ON(ring->tx); in b43legacy_dma_rx()
1346 current_slot = op32_get_current_rxslot(ring); in b43legacy_dma_rx()
1348 ring->nr_slots)); in b43legacy_dma_rx()
1350 slot = ring->current_slot; in b43legacy_dma_rx()
1351 for (; slot != current_slot; slot = next_slot(ring, slot)) { in b43legacy_dma_rx()
1352 dma_rx(ring, &slot); in b43legacy_dma_rx()
1353 update_max_used_slots(ring, ++used_slots); in b43legacy_dma_rx()
1355 op32_set_current_rxslot(ring, slot); in b43legacy_dma_rx()
1356 ring->current_slot = slot; in b43legacy_dma_rx()
1359 static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) in b43legacy_dma_tx_suspend_ring() argument
1361 B43legacy_WARN_ON(!ring->tx); in b43legacy_dma_tx_suspend_ring()
1362 op32_tx_suspend(ring); in b43legacy_dma_tx_suspend_ring()
1365 static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) in b43legacy_dma_tx_resume_ring() argument
1367 B43legacy_WARN_ON(!ring->tx); in b43legacy_dma_tx_resume_ring()
1368 op32_tx_resume(ring); in b43legacy_dma_tx_resume_ring()