1 /*******************************************************************************
2 This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code.
5
6 This contains the functions to handle the dma.
7
8 Copyright (C) 2007-2009 STMicroelectronics Ltd
9
10 This program is free software; you can redistribute it and/or modify it
11 under the terms and conditions of the GNU General Public License,
12 version 2, as published by the Free Software Foundation.
13
14 This program is distributed in the hope it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 more details.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23 *******************************************************************************/
24
25 #include <asm/io.h>
26 #include "dwmac1000.h"
27 #include "dwmac_dma.h"
28
dwmac1000_dma_axi(void __iomem * ioaddr,struct stmmac_axi * axi)29 static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
30 {
31 u32 value = readl(ioaddr + DMA_AXI_BUS_MODE);
32 int i;
33
34 pr_info("dwmac1000: Master AXI performs %s burst length\n",
35 !(value & DMA_AXI_UNDEF) ? "fixed" : "any");
36
37 if (axi->axi_lpi_en)
38 value |= DMA_AXI_EN_LPI;
39 if (axi->axi_xit_frm)
40 value |= DMA_AXI_LPI_XIT_FRM;
41
42 value &= ~DMA_AXI_WR_OSR_LMT;
43 value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) <<
44 DMA_AXI_WR_OSR_LMT_SHIFT;
45
46 value &= ~DMA_AXI_RD_OSR_LMT;
47 value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) <<
48 DMA_AXI_RD_OSR_LMT_SHIFT;
49
50 /* Depending on the UNDEF bit the Master AXI will perform any burst
51 * length according to the BLEN programmed (by default all BLEN are
52 * set).
53 */
54 for (i = 0; i < AXI_BLEN; i++) {
55 switch (axi->axi_blen[i]) {
56 case 256:
57 value |= DMA_AXI_BLEN256;
58 break;
59 case 128:
60 value |= DMA_AXI_BLEN128;
61 break;
62 case 64:
63 value |= DMA_AXI_BLEN64;
64 break;
65 case 32:
66 value |= DMA_AXI_BLEN32;
67 break;
68 case 16:
69 value |= DMA_AXI_BLEN16;
70 break;
71 case 8:
72 value |= DMA_AXI_BLEN8;
73 break;
74 case 4:
75 value |= DMA_AXI_BLEN4;
76 break;
77 }
78 }
79
80 writel(value, ioaddr + DMA_AXI_BUS_MODE);
81 }
82
dwmac1000_dma_init(void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,int atds)83 static void dwmac1000_dma_init(void __iomem *ioaddr,
84 struct stmmac_dma_cfg *dma_cfg, int atds)
85 {
86 u32 value = readl(ioaddr + DMA_BUS_MODE);
87 int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
88 int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
89
90 /*
91 * Set the DMA PBL (Programmable Burst Length) mode.
92 *
93 * Note: before stmmac core 3.50 this mode bit was 4xPBL, and
94 * post 3.5 mode bit acts as 8*PBL.
95 */
96 if (dma_cfg->pblx8)
97 value |= DMA_BUS_MODE_MAXPBL;
98 value |= DMA_BUS_MODE_USP;
99 value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
100 value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
101 value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
102
103 /* Set the Fixed burst mode */
104 if (dma_cfg->fixed_burst)
105 value |= DMA_BUS_MODE_FB;
106
107 /* Mixed Burst has no effect when fb is set */
108 if (dma_cfg->mixed_burst)
109 value |= DMA_BUS_MODE_MB;
110
111 if (atds)
112 value |= DMA_BUS_MODE_ATDS;
113
114 if (dma_cfg->aal)
115 value |= DMA_BUS_MODE_AAL;
116
117 writel(value, ioaddr + DMA_BUS_MODE);
118
119 /* Mask interrupts by writing to CSR7 */
120 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
121 }
122
dwmac1000_dma_init_rx(void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,u32 dma_rx_phy,u32 chan)123 static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
124 struct stmmac_dma_cfg *dma_cfg,
125 u32 dma_rx_phy, u32 chan)
126 {
127 /* RX descriptor base address list must be written into DMA CSR3 */
128 writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
129 }
130
dwmac1000_dma_init_tx(void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,u32 dma_tx_phy,u32 chan)131 static void dwmac1000_dma_init_tx(void __iomem *ioaddr,
132 struct stmmac_dma_cfg *dma_cfg,
133 u32 dma_tx_phy, u32 chan)
134 {
135 /* TX descriptor base address list must be written into DMA CSR4 */
136 writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
137 }
138
dwmac1000_configure_fc(u32 csr6,int rxfifosz)139 static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
140 {
141 csr6 &= ~DMA_CONTROL_RFA_MASK;
142 csr6 &= ~DMA_CONTROL_RFD_MASK;
143
144 /* Leave flow control disabled if receive fifo size is less than
145 * 4K or 0. Otherwise, send XOFF when fifo is 1K less than full,
146 * and send XON when 2K less than full.
147 */
148 if (rxfifosz < 4096) {
149 csr6 &= ~DMA_CONTROL_EFC;
150 pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n",
151 rxfifosz);
152 } else {
153 csr6 |= DMA_CONTROL_EFC;
154 csr6 |= RFA_FULL_MINUS_1K;
155 csr6 |= RFD_FULL_MINUS_2K;
156 }
157 return csr6;
158 }
159
dwmac1000_dma_operation_mode_rx(void __iomem * ioaddr,int mode,u32 channel,int fifosz,u8 qmode)160 static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
161 u32 channel, int fifosz, u8 qmode)
162 {
163 u32 csr6 = readl(ioaddr + DMA_CONTROL);
164
165 if (mode == SF_DMA_MODE) {
166 pr_debug("GMAC: enable RX store and forward mode\n");
167 csr6 |= DMA_CONTROL_RSF;
168 } else {
169 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
170 csr6 &= ~DMA_CONTROL_RSF;
171 csr6 &= DMA_CONTROL_TC_RX_MASK;
172 if (mode <= 32)
173 csr6 |= DMA_CONTROL_RTC_32;
174 else if (mode <= 64)
175 csr6 |= DMA_CONTROL_RTC_64;
176 else if (mode <= 96)
177 csr6 |= DMA_CONTROL_RTC_96;
178 else
179 csr6 |= DMA_CONTROL_RTC_128;
180 }
181
182 /* Configure flow control based on rx fifo size */
183 csr6 = dwmac1000_configure_fc(csr6, fifosz);
184
185 writel(csr6, ioaddr + DMA_CONTROL);
186 }
187
dwmac1000_dma_operation_mode_tx(void __iomem * ioaddr,int mode,u32 channel,int fifosz,u8 qmode)188 static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
189 u32 channel, int fifosz, u8 qmode)
190 {
191 u32 csr6 = readl(ioaddr + DMA_CONTROL);
192
193 if (mode == SF_DMA_MODE) {
194 pr_debug("GMAC: enable TX store and forward mode\n");
195 /* Transmit COE type 2 cannot be done in cut-through mode. */
196 csr6 |= DMA_CONTROL_TSF;
197 /* Operating on second frame increase the performance
198 * especially when transmit store-and-forward is used.
199 */
200 csr6 |= DMA_CONTROL_OSF;
201 } else {
202 pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
203 csr6 &= ~DMA_CONTROL_TSF;
204 csr6 &= DMA_CONTROL_TC_TX_MASK;
205 /* Set the transmit threshold */
206 if (mode <= 32)
207 csr6 |= DMA_CONTROL_TTC_32;
208 else if (mode <= 64)
209 csr6 |= DMA_CONTROL_TTC_64;
210 else if (mode <= 128)
211 csr6 |= DMA_CONTROL_TTC_128;
212 else if (mode <= 192)
213 csr6 |= DMA_CONTROL_TTC_192;
214 else
215 csr6 |= DMA_CONTROL_TTC_256;
216 }
217
218 writel(csr6, ioaddr + DMA_CONTROL);
219 }
220
dwmac1000_dump_dma_regs(void __iomem * ioaddr,u32 * reg_space)221 static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
222 {
223 int i;
224
225 for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++)
226 if ((i < 12) || (i > 17))
227 reg_space[DMA_BUS_MODE / 4 + i] =
228 readl(ioaddr + DMA_BUS_MODE + i * 4);
229 }
230
dwmac1000_get_hw_feature(void __iomem * ioaddr,struct dma_features * dma_cap)231 static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
232 struct dma_features *dma_cap)
233 {
234 u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
235
236 dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
237 dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
238 dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
239 dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
240 dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
241 dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
242 dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
243 dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
244 dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
245 /* MMC */
246 dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
247 /* IEEE 1588-2002 */
248 dma_cap->time_stamp =
249 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
250 /* IEEE 1588-2008 */
251 dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
252 /* 802.3az - Energy-Efficient Ethernet (EEE) */
253 dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
254 dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
255 /* TX and RX csum */
256 dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
257 dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
258 dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
259 dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
260 /* TX and RX number of channels */
261 dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
262 dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
263 /* Alternate (enhanced) DESC mode */
264 dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
265 }
266
dwmac1000_rx_watchdog(void __iomem * ioaddr,u32 riwt,u32 number_chan)267 static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
268 u32 number_chan)
269 {
270 writel(riwt, ioaddr + DMA_RX_WATCHDOG);
271 }
272
273 const struct stmmac_dma_ops dwmac1000_dma_ops = {
274 .reset = dwmac_dma_reset,
275 .init = dwmac1000_dma_init,
276 .init_rx_chan = dwmac1000_dma_init_rx,
277 .init_tx_chan = dwmac1000_dma_init_tx,
278 .axi = dwmac1000_dma_axi,
279 .dump_regs = dwmac1000_dump_dma_regs,
280 .dma_rx_mode = dwmac1000_dma_operation_mode_rx,
281 .dma_tx_mode = dwmac1000_dma_operation_mode_tx,
282 .enable_dma_transmission = dwmac_enable_dma_transmission,
283 .enable_dma_irq = dwmac_enable_dma_irq,
284 .disable_dma_irq = dwmac_disable_dma_irq,
285 .start_tx = dwmac_dma_start_tx,
286 .stop_tx = dwmac_dma_stop_tx,
287 .start_rx = dwmac_dma_start_rx,
288 .stop_rx = dwmac_dma_stop_rx,
289 .dma_interrupt = dwmac_dma_interrupt,
290 .get_hw_feature = dwmac1000_get_hw_feature,
291 .rx_watchdog = dwmac1000_rx_watchdog,
292 };
293