1 /**
2 * \file
3 *
4 * \brief GMAC (Ethernet MAC) driver for SAM.
5 *
6 * Copyright (c) 2013 Atmel Corporation. All rights reserved.
7 *
8 * \asf_license_start
9 *
10 * \page License
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright notice,
16 * this list of conditions and the following disclaimer.
17 *
18 * 2. Redistributions in binary form must reproduce the above copyright notice,
19 * this list of conditions and the following disclaimer in the documentation
20 * and/or other materials provided with the distribution.
21 *
22 * 3. The name of Atmel may not be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * 4. This software may only be redistributed and used in connection with an
26 * Atmel microcontroller product.
27 *
28 * THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED
29 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
31 * EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR
32 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
37 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 *
40 * \asf_license_stop
41 *
42 */
43
44 /* Standard includes. */
45 #include <stdint.h>
46 #include <stdio.h>
47 #include <string.h>
48 #include <stdlib.h>
49
50 /* FreeRTOS includes. */
51 #include "FreeRTOS.h"
52 #include "task.h"
53
54 #include "FreeRTOSIPConfig.h"
55
56 #include "compiler.h"
57 #include "instance/gmac.h"
58 #include "ethernet_phy.h"
59
60 /*/ @cond 0 */
61 /**INDENT-OFF**/
62 #ifdef __cplusplus
63 extern "C" {
64 #endif
65 /**INDENT-ON**/
66 /*/ @endcond */
67
68 #ifndef ARRAY_SIZE
69 #define ARRAY_SIZE( x ) ( int ) ( sizeof( x ) / sizeof( x )[ 0 ] )
70 #endif
71
72 /**
73 * \defgroup gmac_group Ethernet Media Access Controller
74 *
75 * See \ref gmac_quickstart.
76 *
77 * Driver for the GMAC (Ethernet Media Access Controller).
78 * This file contains basic functions for the GMAC, with support for all modes, settings
79 * and clock speeds.
80 *
81 * \section dependencies Dependencies
82 * This driver does not depend on other modules.
83 *
84 * @{
85 */
86
87 /** TX descriptor lists */
88 COMPILER_ALIGNED( 8 )
89 static gmac_tx_descriptor_t gs_tx_desc[ GMAC_TX_BUFFERS ];
90 #if ( GMAC_USES_TX_CALLBACK != 0 )
91 /** TX callback lists */
92 static gmac_dev_tx_cb_t gs_tx_callback[ GMAC_TX_BUFFERS ];
93 #endif
94 /** RX descriptors lists */
95 COMPILER_ALIGNED( 8 )
96 static gmac_rx_descriptor_t gs_rx_desc[ GMAC_RX_BUFFERS ];
97
98 #if ( ipconfigZERO_COPY_TX_DRIVER == 0 )
99
100 /** Send Buffer. Section 3.6 of AMBA 2.0 spec states that burst should not cross the
101 * 1K Boundaries. Receive buffer manager write operations are burst of 2 words => 3 lsb bits
102 * of the address shall be set to 0.
103 */
104 COMPILER_ALIGNED( 8 )
105 static uint8_t gs_uc_tx_buffer[ GMAC_TX_BUFFERS * GMAC_TX_UNITSIZE ];
106 #endif /* ipconfigZERO_COPY_TX_DRIVER */
107
108 /** Receive Buffer */
109 COMPILER_ALIGNED( 8 )
110 static uint8_t gs_uc_rx_buffer[ GMAC_RX_BUFFERS * GMAC_RX_UNITSIZE ];
111
112 /**
113 * GMAC device memory management struct.
114 */
115 typedef struct gmac_dev_mem
116 {
117 /* Pointer to allocated buffer for RX. The address should be 8-byte aligned
118 * and the size should be GMAC_RX_UNITSIZE * wRxSize. */
119 uint8_t * p_rx_buffer;
120 /* Pointer to allocated RX descriptor list. */
121 gmac_rx_descriptor_t * p_rx_dscr;
122 /* RX size, in number of registered units (RX descriptors). */
123 /* Increased size from 16- to 32-bits, because it's more efficient */
124 uint32_t us_rx_size;
125
126 /* Pointer to allocated buffer for TX. The address should be 8-byte aligned
127 * and the size should be GMAC_TX_UNITSIZE * wTxSize. */
128 uint8_t * p_tx_buffer;
129 /* Pointer to allocated TX descriptor list. */
130 gmac_tx_descriptor_t * p_tx_dscr;
131 /* TX size, in number of registered units (TX descriptors). */
132 uint32_t us_tx_size;
133 } gmac_dev_mem_t;
134
135 /** Return count in buffer */
136 #define CIRC_CNT( head, tail, size ) ( ( ( head ) - ( tail ) ) % ( size ) )
137
138 /*
139 * Return space available, from 0 to size-1.
140 * Always leave one free char as a completely full buffer that has (head == tail),
141 * which is the same as empty.
142 */
143 #define CIRC_SPACE( head, tail, size ) CIRC_CNT( ( tail ), ( ( head ) + 1 ), ( size ) )
144
145 /** Circular buffer is empty ? */
146 #define CIRC_EMPTY( head, tail ) ( head == tail )
147 /** Clear circular buffer */
148 #define CIRC_CLEAR( head, tail ) do { ( head ) = 0; ( tail ) = 0; } while( ipFALSE_BOOL )
149
150 /** Increment head or tail */
circ_inc32(int32_t * lHeadOrTail,uint32_t ulSize)151 static __inline void circ_inc32( int32_t * lHeadOrTail,
152 uint32_t ulSize )
153 {
154 ( *lHeadOrTail )++;
155
156 if( ( *lHeadOrTail ) >= ( int32_t ) ulSize )
157 {
158 ( *lHeadOrTail ) = 0;
159 }
160 }
161
162 /**
163 * \brief Wait PHY operation to be completed.
164 *
165 * \param p_gmac HW controller address.
166 * \param ul_retry The retry times, 0 to wait forever until completeness.
167 *
168 * Return GMAC_OK if the operation is completed successfully.
169 */
gmac_wait_phy(Gmac * p_gmac,const uint32_t ul_retry)170 static uint8_t gmac_wait_phy( Gmac * p_gmac,
171 const uint32_t ul_retry )
172 {
173 volatile uint32_t ul_retry_count = 0;
174 const uint32_t xPHYPollDelay = pdMS_TO_TICKS( 1ul );
175
176 while( !gmac_is_phy_idle( p_gmac ) )
177 {
178 if( ul_retry == 0 )
179 {
180 continue;
181 }
182
183 ul_retry_count++;
184
185 if( ul_retry_count >= ul_retry )
186 {
187 return GMAC_TIMEOUT;
188 }
189
190 /* Block the task to allow other tasks to execute while the PHY
191 * is not connected. */
192 vTaskDelay( xPHYPollDelay );
193 }
194
195 return GMAC_OK;
196 }
197
198 /**
199 * \brief Disable transfer, reset registers and descriptor lists.
200 *
201 * \param p_dev Pointer to GMAC driver instance.
202 *
203 */
gmac_reset_tx_mem(gmac_device_t * p_dev)204 static void gmac_reset_tx_mem( gmac_device_t * p_dev )
205 {
206 Gmac * p_hw = p_dev->p_hw;
207 uint8_t * p_tx_buff = p_dev->p_tx_buffer;
208 gmac_tx_descriptor_t * p_td = p_dev->p_tx_dscr;
209
210 uint32_t ul_index;
211 uint32_t ul_address;
212
213 /* Disable TX */
214 gmac_enable_transmit( p_hw, 0 );
215
216 /* Set up the TX descriptors */
217 CIRC_CLEAR( p_dev->l_tx_head, p_dev->l_tx_tail );
218
219 for( ul_index = 0; ul_index < p_dev->ul_tx_list_size; ul_index++ )
220 {
221 #if ( ipconfigZERO_COPY_TX_DRIVER != 0 )
222 {
223 ul_address = ( uint32_t ) 0u;
224 }
225 #else
226 {
227 ul_address = ( uint32_t ) ( &( p_tx_buff[ ul_index * GMAC_TX_UNITSIZE ] ) );
228 }
229 #endif /* ipconfigZERO_COPY_TX_DRIVER */
230 p_td[ ul_index ].addr = ul_address;
231 p_td[ ul_index ].status.val = GMAC_TXD_USED;
232 }
233
234 p_td[ p_dev->ul_tx_list_size - 1 ].status.val =
235 GMAC_TXD_USED | GMAC_TXD_WRAP;
236
237 /* Set transmit buffer queue */
238 gmac_set_tx_queue( p_hw, ( uint32_t ) p_td );
239 }
240
241 /**
242 * \brief Disable receiver, reset registers and descriptor list.
243 *
244 * \param p_drv Pointer to GMAC Driver instance.
245 */
gmac_reset_rx_mem(gmac_device_t * p_dev)246 static void gmac_reset_rx_mem( gmac_device_t * p_dev )
247 {
248 Gmac * p_hw = p_dev->p_hw;
249 uint8_t * p_rx_buff = p_dev->p_rx_buffer;
250 gmac_rx_descriptor_t * pRd = p_dev->p_rx_dscr;
251
252 uint32_t ul_index;
253 uint32_t ul_address;
254
255 /* Disable RX */
256 gmac_enable_receive( p_hw, 0 );
257
258 /* Set up the RX descriptors */
259 p_dev->ul_rx_idx = 0;
260
261 for( ul_index = 0; ul_index < p_dev->ul_rx_list_size; ul_index++ )
262 {
263 ul_address = ( uint32_t ) ( &( p_rx_buff[ ul_index * GMAC_RX_UNITSIZE ] ) );
264 pRd[ ul_index ].addr.val = ul_address & GMAC_RXD_ADDR_MASK;
265 pRd[ ul_index ].status.val = 0;
266 }
267
268 pRd[ p_dev->ul_rx_list_size - 1 ].addr.val |= GMAC_RXD_WRAP;
269
270 /* Set receive buffer queue */
271 gmac_set_rx_queue( p_hw, ( uint32_t ) pRd );
272 }
273
274
275 /**
276 * \brief Initialize the allocated buffer lists for GMAC driver to transfer data.
277 * Must be invoked after gmac_dev_init() but before RX/TX starts.
278 *
279 * \note If input address is not 8-byte aligned, the address is automatically
280 * adjusted and the list size is reduced by one.
281 *
282 * \param p_gmac Pointer to GMAC instance.
283 * \param p_gmac_dev Pointer to GMAC device instance.
284 * \param p_dev_mm Pointer to the GMAC memory management control block.
285 * \param p_tx_cb Pointer to allocated TX callback list.
286 *
287 * \return GMAC_OK or GMAC_PARAM.
288 */
gmac_init_mem(Gmac * p_gmac,gmac_device_t * p_gmac_dev,gmac_dev_mem_t * p_dev_mm,gmac_dev_tx_cb_t * p_tx_cb)289 static uint8_t gmac_init_mem( Gmac * p_gmac,
290 gmac_device_t * p_gmac_dev,
291 gmac_dev_mem_t * p_dev_mm
292 #if ( GMAC_USES_TX_CALLBACK != 0 )
293 ,
294 gmac_dev_tx_cb_t * p_tx_cb
295 #endif
296 )
297 {
298 if( ( p_dev_mm->us_rx_size <= 1 ) || p_dev_mm->us_tx_size <= 1
299 #if ( GMAC_USES_TX_CALLBACK != 0 )
300 || p_tx_cb == NULL
301 #endif
302 )
303 {
304 return GMAC_PARAM;
305 }
306
307 /* Assign RX buffers */
308 if( ( ( uint32_t ) p_dev_mm->p_rx_buffer & 0x7 ) ||
309 ( ( uint32_t ) p_dev_mm->p_rx_dscr & 0x7 ) )
310 {
311 p_dev_mm->us_rx_size--;
312 }
313
314 p_gmac_dev->p_rx_buffer =
315 ( uint8_t * ) ( ( uint32_t ) p_dev_mm->p_rx_buffer & 0xFFFFFFF8 );
316 p_gmac_dev->p_rx_dscr =
317 ( gmac_rx_descriptor_t * ) ( ( uint32_t ) p_dev_mm->p_rx_dscr
318 & 0xFFFFFFF8 );
319 p_gmac_dev->ul_rx_list_size = p_dev_mm->us_rx_size;
320
321 /* Assign TX buffers */
322 if( ( ( uint32_t ) p_dev_mm->p_tx_buffer & 0x7 ) ||
323 ( ( uint32_t ) p_dev_mm->p_tx_dscr & 0x7 ) )
324 {
325 p_dev_mm->us_tx_size--;
326 }
327
328 p_gmac_dev->p_tx_buffer =
329 ( uint8_t * ) ( ( uint32_t ) p_dev_mm->p_tx_buffer & 0xFFFFFFF8 );
330 p_gmac_dev->p_tx_dscr =
331 ( gmac_tx_descriptor_t * ) ( ( uint32_t ) p_dev_mm->p_tx_dscr
332 & 0xFFFFFFF8 );
333 p_gmac_dev->ul_tx_list_size = p_dev_mm->us_tx_size;
334 #if ( GMAC_USES_TX_CALLBACK != 0 )
335 p_gmac_dev->func_tx_cb_list = p_tx_cb;
336 #endif
337 /* Reset TX & RX */
338 gmac_reset_rx_mem( p_gmac_dev );
339 gmac_reset_tx_mem( p_gmac_dev );
340
341 /* Enable Rx and Tx, plus the statistics register */
342 gmac_enable_transmit( p_gmac, true );
343 gmac_enable_receive( p_gmac, true );
344 gmac_enable_statistics_write( p_gmac, true );
345
346 /* Set up the interrupts for transmission and errors */
347 gmac_enable_interrupt( p_gmac,
348 GMAC_IER_RXUBR | /* Enable receive used bit read interrupt. */
349 GMAC_IER_TUR | /* Enable transmit underrun interrupt. */
350 GMAC_IER_RLEX | /* Enable retry limit exceeded interrupt. */
351 GMAC_IER_TFC | /* Enable transmit buffers exhausted in mid-frame interrupt. */
352 GMAC_IER_TCOMP | /* Enable transmit complete interrupt. */
353 GMAC_IER_ROVR | /* Enable receive overrun interrupt. */
354 GMAC_IER_HRESP | /* Enable Hresp not OK interrupt. */
355 GMAC_IER_PFNZ | /* Enable pause frame received interrupt. */
356 GMAC_IER_PTZ ); /* Enable pause time zero interrupt. */
357
358 return GMAC_OK;
359 }
360
361 /**
362 * \brief Read the PHY register.
363 *
364 * \param p_gmac Pointer to the GMAC instance.
365 * \param uc_phy_address PHY address.
366 * \param uc_address Register address.
367 * \param p_value Pointer to a 32-bit location to store read data.
368 *
369 * \Return GMAC_OK if successfully, GMAC_TIMEOUT if timeout.
370 */
gmac_phy_read(Gmac * p_gmac,uint8_t uc_phy_address,uint8_t uc_address,uint32_t * p_value)371 uint8_t gmac_phy_read( Gmac * p_gmac,
372 uint8_t uc_phy_address,
373 uint8_t uc_address,
374 uint32_t * p_value )
375 {
376 gmac_maintain_phy( p_gmac, uc_phy_address, uc_address, 1, 0 );
377
378 if( gmac_wait_phy( p_gmac, MAC_PHY_RETRY_MAX ) == GMAC_TIMEOUT )
379 {
380 return GMAC_TIMEOUT;
381 }
382
383 *p_value = gmac_get_phy_data( p_gmac );
384 return GMAC_OK;
385 }
386
387 /**
388 * \brief Write the PHY register.
389 *
390 * \param p_gmac Pointer to the GMAC instance.
391 * \param uc_phy_address PHY Address.
392 * \param uc_address Register Address.
393 * \param ul_value Data to write, actually 16-bit data.
394 *
395 * \Return GMAC_OK if successfully, GMAC_TIMEOUT if timeout.
396 */
gmac_phy_write(Gmac * p_gmac,uint8_t uc_phy_address,uint8_t uc_address,uint32_t ul_value)397 uint8_t gmac_phy_write( Gmac * p_gmac,
398 uint8_t uc_phy_address,
399 uint8_t uc_address,
400 uint32_t ul_value )
401 {
402 gmac_maintain_phy( p_gmac, uc_phy_address, uc_address, 0, ul_value );
403
404 if( gmac_wait_phy( p_gmac, MAC_PHY_RETRY_MAX ) == GMAC_TIMEOUT )
405 {
406 return GMAC_TIMEOUT;
407 }
408
409 return GMAC_OK;
410 }
411
412 /**
413 * \brief Initialize the GMAC driver.
414 *
415 * \param p_gmac Pointer to the GMAC instance.
416 * \param p_gmac_dev Pointer to the GMAC device instance.
417 * \param p_opt GMAC configure options.
418 */
gmac_dev_init(Gmac * p_gmac,gmac_device_t * p_gmac_dev,gmac_options_t * p_opt)419 void gmac_dev_init( Gmac * p_gmac,
420 gmac_device_t * p_gmac_dev,
421 gmac_options_t * p_opt )
422 {
423 gmac_dev_mem_t gmac_dev_mm;
424
425 /* Disable TX & RX and more */
426 gmac_network_control( p_gmac, 0 );
427 gmac_disable_interrupt( p_gmac, ~0u );
428
429
430 gmac_clear_statistics( p_gmac );
431
432 /* Clear all status bits in the receive status register. */
433 gmac_clear_rx_status( p_gmac, GMAC_RSR_RXOVR | GMAC_RSR_REC | GMAC_RSR_BNA );
434
435 /* Clear all status bits in the transmit status register */
436 gmac_clear_tx_status( p_gmac, GMAC_TSR_UBR | GMAC_TSR_COL | GMAC_TSR_RLE
437 | GMAC_TSR_TFC | GMAC_TSR_TXCOMP | GMAC_TSR_UND );
438
439 /* Clear interrupts */
440 gmac_get_interrupt_status( p_gmac );
441 #if !defined( ETHERNET_CONF_DATA_OFFSET )
442
443 /* Receive Buffer Offset
444 * Indicates the number of bytes by which the received data
445 * is offset from the start of the receive buffer
446 * which can be handy for alignment reasons */
447 /* Note: FreeRTOS+TCP wants to have this offset set to 2 bytes */
448 #error ETHERNET_CONF_DATA_OFFSET not defined, assuming 0
449 #endif
450
451 /* Enable the copy of data into the buffers
452 * ignore broadcasts, and not copy FCS. */
453
454 gmac_set_configure( p_gmac,
455 ( gmac_get_configure( p_gmac ) & ~GMAC_NCFGR_RXBUFO_Msk ) |
456 GMAC_NCFGR_RFCS | /* Remove FCS, frame check sequence (last 4 bytes) */
457 GMAC_NCFGR_PEN | /* Pause Enable */
458 GMAC_NCFGR_RXBUFO( ETHERNET_CONF_DATA_OFFSET ) |
459 GMAC_RXD_RXCOEN );
460
461 /*
462 * GMAC_DCFGR_TXCOEN: (GMAC_DCFGR) Transmitter Checksum Generation Offload Enable.
463 * Note: that SAM4E does have RX checksum offloading
464 * but TX checksum offloading has NOT been implemented.
465 */
466
467 gmac_set_dma( p_gmac,
468 gmac_get_dma( p_gmac ) | GMAC_DCFGR_TXCOEN );
469
470 gmac_enable_copy_all( p_gmac, p_opt->uc_copy_all_frame );
471 gmac_disable_broadcast( p_gmac, p_opt->uc_no_boardcast );
472
473 /* Fill in GMAC device memory management */
474 gmac_dev_mm.p_rx_buffer = gs_uc_rx_buffer;
475 gmac_dev_mm.p_rx_dscr = gs_rx_desc;
476 gmac_dev_mm.us_rx_size = GMAC_RX_BUFFERS;
477
478 #if ( ipconfigZERO_COPY_TX_DRIVER != 0 )
479 {
480 gmac_dev_mm.p_tx_buffer = NULL;
481 }
482 #else
483 {
484 gmac_dev_mm.p_tx_buffer = gs_uc_tx_buffer;
485 }
486 #endif
487 gmac_dev_mm.p_tx_dscr = gs_tx_desc;
488 gmac_dev_mm.us_tx_size = GMAC_TX_BUFFERS;
489
490 gmac_init_mem( p_gmac, p_gmac_dev, &gmac_dev_mm
491 #if ( GMAC_USES_TX_CALLBACK != 0 )
492 , gs_tx_callback
493 #endif
494 );
495
496 gmac_set_address( p_gmac, 0, p_opt->uc_mac_addr );
497 }
498
499 /**
500 * \brief Frames can be read from the GMAC in multiple sections.
501 *
502 * Returns > 0 if a complete frame is available
503 * It also it cleans up incomplete older frames
504 */
505
gmac_dev_poll(gmac_device_t * p_gmac_dev)506 static uint32_t gmac_dev_poll( gmac_device_t * p_gmac_dev )
507 {
508 uint32_t ulReturn = 0;
509 int32_t ulIndex = p_gmac_dev->ul_rx_idx;
510 gmac_rx_descriptor_t * pxHead = &p_gmac_dev->p_rx_dscr[ ulIndex ];
511
512 /* Discard any incomplete frames */
513 while( ( pxHead->addr.val & GMAC_RXD_OWNERSHIP ) &&
514 ( pxHead->status.val & GMAC_RXD_SOF ) == 0 )
515 {
516 pxHead->addr.val &= ~( GMAC_RXD_OWNERSHIP );
517 circ_inc32( &ulIndex, p_gmac_dev->ul_rx_list_size );
518 pxHead = &p_gmac_dev->p_rx_dscr[ ulIndex ];
519 p_gmac_dev->ul_rx_idx = ulIndex;
520 #if ( GMAC_STATS != 0 )
521 {
522 gmacStats.incompCount++;
523 }
524 #endif
525 }
526
527 while( ( pxHead->addr.val & GMAC_RXD_OWNERSHIP ) != 0 )
528 {
529 if( ( pxHead->status.val & GMAC_RXD_EOF ) != 0 )
530 {
531 /* Here a complete frame has been seen with SOF and EOF */
532 ulReturn = pxHead->status.bm.len;
533 break;
534 }
535
536 circ_inc32( &ulIndex, p_gmac_dev->ul_rx_list_size );
537 pxHead = &p_gmac_dev->p_rx_dscr[ ulIndex ];
538
539 if( ( pxHead->addr.val & GMAC_RXD_OWNERSHIP ) == 0 )
540 {
541 /* CPU is not the owner (yet) */
542 break;
543 }
544
545 if( ( pxHead->status.val & GMAC_RXD_SOF ) != 0 )
546 {
547 /* Strange, we found a new Start Of Frame
548 * discard previous segments */
549 int32_t ulPrev = p_gmac_dev->ul_rx_idx;
550 pxHead = &p_gmac_dev->p_rx_dscr[ ulPrev ];
551
552 do
553 {
554 pxHead->addr.val &= ~( GMAC_RXD_OWNERSHIP );
555 circ_inc32( &ulPrev, p_gmac_dev->ul_rx_list_size );
556 pxHead = &p_gmac_dev->p_rx_dscr[ ulPrev ];
557 #if ( GMAC_STATS != 0 )
558 {
559 gmacStats.truncCount++;
560 }
561 #endif
562 } while( ulPrev != ulIndex );
563
564 p_gmac_dev->ul_rx_idx = ulIndex;
565 }
566 }
567
568 return ulReturn;
569 }
570
571 /**
572 * \brief Frames can be read from the GMAC in multiple sections.
573 * Read ul_frame_size bytes from the GMAC receive buffers to pcTo.
574 * p_rcv_size is the size of the entire frame. Generally gmac_read
575 * will be repeatedly called until the sum of all the ul_frame_size equals
576 * the value of p_rcv_size.
577 *
578 * \param p_gmac_dev Pointer to the GMAC device instance.
579 * \param p_frame Address of the frame buffer.
580 * \param ul_frame_size Length of the frame.
581 * \param p_rcv_size Received frame size.
582 *
583 * \return GMAC_OK if receiving frame successfully, otherwise failed.
584 */
gmac_dev_read(gmac_device_t * p_gmac_dev,uint8_t * p_frame,uint32_t ul_frame_size,uint32_t * p_rcv_size)585 uint32_t gmac_dev_read( gmac_device_t * p_gmac_dev,
586 uint8_t * p_frame,
587 uint32_t ul_frame_size,
588 uint32_t * p_rcv_size )
589 {
590 int32_t nextIdx; /* A copy of the Rx-index 'ul_rx_idx' */
591 int32_t bytesLeft = gmac_dev_poll( p_gmac_dev );
592 gmac_rx_descriptor_t * pxHead;
593
594 if( bytesLeft == 0 )
595 {
596 return GMAC_RX_NULL;
597 }
598
599 /* gmac_dev_poll has confirmed that there is a complete frame at
600 * the current position 'ul_rx_idx'
601 */
602 nextIdx = p_gmac_dev->ul_rx_idx;
603
604 /* Read +2 bytes because buffers are aligned at -2 bytes */
605 bytesLeft = min( bytesLeft + 2, ( int32_t ) ul_frame_size );
606
607 /* The frame will be copied in 1 or 2 memcpy's */
608 if( ( p_frame != NULL ) && ( bytesLeft != 0 ) )
609 {
610 const uint8_t * source;
611 int32_t left;
612 int32_t toCopy;
613
614 source = p_gmac_dev->p_rx_buffer + nextIdx * GMAC_RX_UNITSIZE;
615 left = bytesLeft;
616 toCopy = ( p_gmac_dev->ul_rx_list_size - nextIdx ) * GMAC_RX_UNITSIZE;
617
618 if( toCopy > left )
619 {
620 toCopy = left;
621 }
622
623 memcpy( p_frame, source, toCopy );
624 left -= toCopy;
625
626 if( left != 0ul )
627 {
628 memcpy( p_frame + toCopy, ( void * ) p_gmac_dev->p_rx_buffer, left );
629 }
630 }
631
632 do
633 {
634 pxHead = &p_gmac_dev->p_rx_dscr[ nextIdx ];
635 pxHead->addr.val &= ~( GMAC_RXD_OWNERSHIP );
636 circ_inc32( &nextIdx, p_gmac_dev->ul_rx_list_size );
637 } while( ( pxHead->status.val & GMAC_RXD_EOF ) == 0 );
638
639 p_gmac_dev->ul_rx_idx = nextIdx;
640
641 *p_rcv_size = bytesLeft;
642
643 return GMAC_OK;
644 }
645
646
647 extern void vGMACGenerateChecksum( uint8_t * apBuffer );
648
649 /**
650 * \brief Send ulLength bytes from pcFrom. This copies the buffer to one of the
651 * GMAC Tx buffers, and then indicates to the GMAC that the buffer is ready.
652 * If lEndOfFrame is true then the data being copied is the end of the frame
653 * and the frame can be transmitted.
654 *
655 * \param p_gmac_dev Pointer to the GMAC device instance.
656 * \param p_buffer Pointer to the data buffer.
657 * \param ul_size Length of the frame.
658 * \param func_tx_cb Transmit callback function.
659 *
660 * \return Length sent.
661 */
gmac_dev_write(gmac_device_t * p_gmac_dev,void * p_buffer,uint32_t ul_size,gmac_dev_tx_cb_t func_tx_cb)662 uint32_t gmac_dev_write( gmac_device_t * p_gmac_dev,
663 void * p_buffer,
664 uint32_t ul_size,
665 gmac_dev_tx_cb_t func_tx_cb )
666 {
667 volatile gmac_tx_descriptor_t * p_tx_td;
668
669 #if ( GMAC_USES_TX_CALLBACK != 0 )
670 volatile gmac_dev_tx_cb_t * p_func_tx_cb;
671 #endif
672
673 Gmac * p_hw = p_gmac_dev->p_hw;
674
675 #if ( GMAC_USES_TX_CALLBACK == 0 )
676 ( void ) func_tx_cb;
677 #endif
678
679 /* Check parameter */
680 if( ul_size > GMAC_TX_UNITSIZE )
681 {
682 return GMAC_PARAM;
683 }
684
685 /* Pointers to the current transmit descriptor */
686 p_tx_td = &p_gmac_dev->p_tx_dscr[ p_gmac_dev->l_tx_head ];
687
688 /* If no free TxTd, buffer can't be sent, schedule the wakeup callback */
689 /* if (CIRC_SPACE(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail, */
690 /* p_gmac_dev->ul_tx_list_size) == 0) */
691 {
692 if( ( p_tx_td->status.val & GMAC_TXD_USED ) == 0 )
693 {
694 return GMAC_TX_BUSY;
695 }
696 }
697 #if ( GMAC_USES_TX_CALLBACK != 0 )
698 /* Pointers to the current Tx callback */
699 p_func_tx_cb = &p_gmac_dev->func_tx_cb_list[ p_gmac_dev->l_tx_head ];
700 #endif
701
702 /* Set up/copy data to transmission buffer */
703 if( p_buffer && ul_size )
704 {
705 /* Driver manages the ring buffer */
706
707 /* Calculating the checksum here is faster than calculating it from the GMAC buffer
708 * because within p_buffer, it is well aligned */
709 #if ( ipconfigZERO_COPY_TX_DRIVER != 0 )
710 {
711 /* Zero-copy... */
712 p_tx_td->addr = ( uint32_t ) p_buffer;
713 }
714 #else
715 {
716 /* Or Memcopy... */
717 memcpy( ( void * ) p_tx_td->addr, p_buffer, ul_size );
718 }
719 #endif /* ipconfigZERO_COPY_TX_DRIVER */
720 vGMACGenerateChecksum( ( uint8_t * ) p_tx_td->addr );
721 }
722
723 #if ( GMAC_USES_TX_CALLBACK != 0 )
724 /* Tx callback */
725 *p_func_tx_cb = func_tx_cb;
726 #endif
727
728 /* Update transmit descriptor status */
729
730 /* The buffer size defined is the length of ethernet frame,
731 * so it's always the last buffer of the frame. */
732 if( p_gmac_dev->l_tx_head == ( int32_t ) ( p_gmac_dev->ul_tx_list_size - 1 ) )
733 {
734 /* No need to 'and' with GMAC_TXD_LEN_MASK because ul_size has been checked */
735 p_tx_td->status.val =
736 ul_size | GMAC_TXD_LAST | GMAC_TXD_WRAP;
737 }
738 else
739 {
740 p_tx_td->status.val =
741 ul_size | GMAC_TXD_LAST;
742 }
743
744 circ_inc32( &p_gmac_dev->l_tx_head, p_gmac_dev->ul_tx_list_size );
745
746 /* Now start to transmit if it is still not done */
747 gmac_start_transmission( p_hw );
748
749 return GMAC_OK;
750 }
751
752 /**
753 * \brief Get current load of transmit.
754 *
755 * \param p_gmac_dev Pointer to the GMAC device instance.
756 *
757 * \return Current load of transmit.
758 */
759 #if ( GMAC_USES_TX_CALLBACK != 0 )
760 /* Without defining GMAC_USES_TX_CALLBACK, l_tx_tail won't be updated */
gmac_dev_get_tx_load(gmac_device_t * p_gmac_dev)761 uint32_t gmac_dev_get_tx_load( gmac_device_t * p_gmac_dev )
762 {
763 uint16_t us_head = p_gmac_dev->l_tx_head;
764 uint16_t us_tail = p_gmac_dev->l_tx_tail;
765
766 return CIRC_CNT( us_head, us_tail, p_gmac_dev->ul_tx_list_size );
767 }
768 #endif
769
770 /**
771 * \brief Register/Clear RX callback. Callback will be invoked after the next received
772 * frame.
773 *
774 * When gmac_dev_read() returns GMAC_RX_NULL, the application task calls
775 * gmac_dev_set_rx_callback() to register func_rx_cb() callback and enters suspend state.
776 * The callback is in charge to resume the task once a new frame has been
777 * received. The next time gmac_dev_read() is called, it will be successful.
778 *
779 * This function is usually invoked from the RX callback itself with NULL
780 * callback, to unregister. Once the callback has resumed the application task,
781 * there is no need to invoke the callback again.
782 *
783 * \param p_gmac_dev Pointer to the GMAC device instance.
784 * \param func_tx_cb Receive callback function.
785 */
gmac_dev_set_rx_callback(gmac_device_t * p_gmac_dev,gmac_dev_rx_cb_t func_rx_cb)786 void gmac_dev_set_rx_callback( gmac_device_t * p_gmac_dev,
787 gmac_dev_rx_cb_t func_rx_cb )
788 {
789 Gmac * p_hw = p_gmac_dev->p_hw;
790
791 if( func_rx_cb == NULL )
792 {
793 gmac_disable_interrupt( p_hw, GMAC_IDR_RCOMP );
794 p_gmac_dev->func_rx_cb = NULL;
795 }
796 else
797 {
798 p_gmac_dev->func_rx_cb = func_rx_cb;
799 gmac_enable_interrupt( p_hw, GMAC_IER_RCOMP );
800 }
801 }
802
803 /**
804 * \brief Register/Clear TX wakeup callback.
805 *
806 * When gmac_dev_write() returns GMAC_TX_BUSY (all transmit descriptor busy), the application
807 * task calls gmac_dev_set_tx_wakeup_callback() to register func_wakeup() callback and
808 * enters suspend state. The callback is in charge to resume the task once
809 * several transmit descriptors have been released. The next time gmac_dev_write() will be called,
810 * it shall be successful.
811 *
812 * This function is usually invoked with NULL callback from the TX wakeup
813 * callback itself, to unregister. Once the callback has resumed the
814 * application task, there is no need to invoke the callback again.
815 *
816 * \param p_gmac_dev Pointer to GMAC device instance.
817 * \param func_wakeup Pointer to wakeup callback function.
818 * \param uc_threshold Number of free transmit descriptor before wakeup callback invoked.
819 *
820 * \return GMAC_OK, GMAC_PARAM on parameter error.
821 */
822 #if ( GMAC_USES_WAKEUP_CALLBACK )
gmac_dev_set_tx_wakeup_callback(gmac_device_t * p_gmac_dev,gmac_dev_wakeup_cb_t func_wakeup_cb,uint8_t uc_threshold)823 uint8_t gmac_dev_set_tx_wakeup_callback( gmac_device_t * p_gmac_dev,
824 gmac_dev_wakeup_cb_t func_wakeup_cb,
825 uint8_t uc_threshold )
826 {
827 if( func_wakeup_cb == NULL )
828 {
829 p_gmac_dev->func_wakeup_cb = NULL;
830 }
831 else
832 {
833 if( uc_threshold <= p_gmac_dev->ul_tx_list_size )
834 {
835 p_gmac_dev->func_wakeup_cb = func_wakeup_cb;
836 p_gmac_dev->uc_wakeup_threshold = uc_threshold;
837 }
838 else
839 {
840 return GMAC_PARAM;
841 }
842 }
843
844 return GMAC_OK;
845 }
846 #endif /* GMAC_USES_WAKEUP_CALLBACK */
847
848 /**
849 * \brief Reset TX & RX queue & statistics.
850 *
851 * \param p_gmac_dev Pointer to GMAC device instance.
852 */
gmac_dev_reset(gmac_device_t * p_gmac_dev)853 void gmac_dev_reset( gmac_device_t * p_gmac_dev )
854 {
855 Gmac * p_hw = p_gmac_dev->p_hw;
856
857 gmac_reset_rx_mem( p_gmac_dev );
858 gmac_reset_tx_mem( p_gmac_dev );
859 gmac_network_control( p_hw, GMAC_NCR_TXEN | GMAC_NCR_RXEN
860 | GMAC_NCR_WESTAT | GMAC_NCR_CLRSTAT );
861 }
862
863 void gmac_dev_halt( Gmac * p_gmac );
864
gmac_dev_halt(Gmac * p_gmac)865 void gmac_dev_halt( Gmac * p_gmac )
866 {
867 gmac_network_control( p_gmac, GMAC_NCR_WESTAT | GMAC_NCR_CLRSTAT );
868 gmac_disable_interrupt( p_gmac, ~0u );
869 }
870
871
872 /**
873 * \brief GMAC Interrupt handler.
874 *
875 * \param p_gmac_dev Pointer to GMAC device instance.
876 */
877
878 #if ( GMAC_STATS != 0 )
879 extern int logPrintf( const char * pcFormat,
880 ... );
881
gmac_show_irq_counts()882 void gmac_show_irq_counts()
883 {
884 int index;
885
886 for( index = 0; index < ARRAY_SIZE( intPairs ); index++ )
887 {
888 if( gmacStats.intStatus[ intPairs[ index ].index ] )
889 {
890 logPrintf( "%s : %6u\n", intPairs[ index ].name, gmacStats.intStatus[ intPairs[ index ].index ] );
891 }
892 }
893 }
894 #endif /* if ( GMAC_STATS != 0 ) */
895
gmac_handler(gmac_device_t * p_gmac_dev)896 void gmac_handler( gmac_device_t * p_gmac_dev )
897 {
898 Gmac * p_hw = p_gmac_dev->p_hw;
899
900 #if ( GMAC_USES_TX_CALLBACK != 0 )
901 gmac_tx_descriptor_t * p_tx_td;
902 gmac_dev_tx_cb_t * p_tx_cb = NULL;
903 uint32_t ul_tx_status_flag;
904 #endif
905 #if ( GMAC_STATS != 0 )
906 int index;
907 #endif
908
909 /* volatile */ uint32_t ul_isr;
910 /* volatile */ uint32_t ul_rsr;
911 /* volatile */ uint32_t ul_tsr;
912
913 ul_isr = gmac_get_interrupt_status( p_hw );
914 ul_rsr = gmac_get_rx_status( p_hw );
915 ul_tsr = gmac_get_tx_status( p_hw );
916
917 /* Why clear bits that are ignored anyway ? */
918 /* ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300); */
919 #if ( GMAC_STATS != 0 )
920 {
921 for( index = 0; index < ARRAY_SIZE( intPairs ); index++ )
922 {
923 if( ul_isr & intPairs[ index ].mask )
924 {
925 gmacStats.intStatus[ intPairs[ index ].index ]++;
926 }
927 }
928 }
929 #endif /* GMAC_STATS != 0 */
930
931 /* RX packet */
932 if( ( ul_isr & GMAC_ISR_RCOMP ) || ( ul_rsr & ( GMAC_RSR_REC | GMAC_RSR_RXOVR | GMAC_RSR_BNA ) ) )
933 {
934 /* Clear status */
935 gmac_clear_rx_status( p_hw, ul_rsr );
936
937 if( ul_isr & GMAC_ISR_RCOMP )
938 {
939 ul_rsr |= GMAC_RSR_REC;
940 }
941
942 /* Invoke callbacks which can be useful to wake up a task */
943 if( p_gmac_dev->func_rx_cb )
944 {
945 p_gmac_dev->func_rx_cb( ul_rsr );
946 }
947 }
948
949 /* TX packet */
950 if( ( ul_isr & GMAC_ISR_TCOMP ) || ( ul_tsr & ( GMAC_TSR_TXCOMP | GMAC_TSR_COL | GMAC_TSR_RLE | GMAC_TSR_UND ) ) )
951 {
952 #if ( GMAC_USES_TX_CALLBACK != 0 )
953 ul_tx_status_flag = GMAC_TSR_TXCOMP;
954 #endif
955 /* A frame transmitted */
956
957 /* Check RLE */
958 if( ul_tsr & GMAC_TSR_RLE )
959 {
960 /* Status RLE & Number of discarded buffers */
961 #if ( GMAC_USES_TX_CALLBACK != 0 )
962 ul_tx_status_flag = GMAC_TSR_RLE | CIRC_CNT( p_gmac_dev->l_tx_head,
963 p_gmac_dev->l_tx_tail, p_gmac_dev->ul_tx_list_size );
964 p_tx_cb = &p_gmac_dev->func_tx_cb_list[ p_gmac_dev->l_tx_tail ];
965 #endif
966 gmac_reset_tx_mem( p_gmac_dev );
967 gmac_enable_transmit( p_hw, 1 );
968 }
969
970 /* Clear status */
971 gmac_clear_tx_status( p_hw, ul_tsr );
972
973 #if ( GMAC_USES_TX_CALLBACK != 0 )
974 if( !CIRC_EMPTY( p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail ) )
975 {
976 /* Check the buffers */
977 do
978 {
979 p_tx_td = &p_gmac_dev->p_tx_dscr[ p_gmac_dev->l_tx_tail ];
980 p_tx_cb = &p_gmac_dev->func_tx_cb_list[ p_gmac_dev->l_tx_tail ];
981
982 /* Any error? Exit if buffer has not been sent yet */
983 if( ( p_tx_td->status.val & GMAC_TXD_USED ) == 0 )
984 {
985 break;
986 }
987
988 /* Notify upper layer that a packet has been sent */
989 if( *p_tx_cb )
990 {
991 ( *p_tx_cb )( ul_tx_status_flag, ( void * ) p_tx_td->addr );
992 #if ( ipconfigZERO_COPY_TX_DRIVER != 0 )
993 {
994 p_tx_td->addr = 0ul;
995 }
996 #endif /* ipconfigZERO_COPY_TX_DRIVER */
997 }
998
999 circ_inc32( &p_gmac_dev->l_tx_tail, p_gmac_dev->ul_tx_list_size );
1000 } while( CIRC_CNT( p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail,
1001 p_gmac_dev->ul_tx_list_size ) );
1002 }
1003
1004 if( ul_tsr & GMAC_TSR_RLE )
1005 {
1006 /* Notify upper layer RLE */
1007 if( *p_tx_cb )
1008 {
1009 ( *p_tx_cb )( ul_tx_status_flag, NULL );
1010 }
1011 }
1012 #endif /* GMAC_USES_TX_CALLBACK */
1013
1014 #if ( GMAC_USES_WAKEUP_CALLBACK )
1015
1016 /* If a wakeup has been scheduled, notify upper layer that it can
1017 * send other packets, and the sending will be successful. */
1018 if( ( CIRC_SPACE( p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail,
1019 p_gmac_dev->ul_tx_list_size ) >= p_gmac_dev->uc_wakeup_threshold ) &&
1020 p_gmac_dev->func_wakeup_cb )
1021 {
1022 p_gmac_dev->func_wakeup_cb();
1023 }
1024 #endif
1025 }
1026 }
1027
1028 /*@} */
1029
1030 /*/ @cond 0 */
1031 /**INDENT-OFF**/
1032 #ifdef __cplusplus
1033 }
1034 #endif
1035 /**INDENT-ON**/
1036 /*/ @endcond */
1037