@@ -96,6 +96,7 @@ static struct net_buf *rx_frag_list_que1[PRIORITY_QUEUE1_RX_DESC_COUNT];
96
96
#if GMAC_PRIORITY_QUEUE_NO == 2
97
97
static struct net_buf * rx_frag_list_que2 [PRIORITY_QUEUE2_RX_DESC_COUNT ];
98
98
#endif
99
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
99
100
/* TX buffer accounting list */
100
101
static struct net_buf * tx_frag_list_que0 [MAIN_QUEUE_TX_DESC_COUNT ];
101
102
#if GMAC_PRIORITY_QUEUE_NO >= 1
@@ -114,6 +115,7 @@ static struct net_pkt *tx_frame_list_que1[CONFIG_NET_PKT_TX_COUNT + 1];
114
115
static struct net_pkt * tx_frame_list_que2 [CONFIG_NET_PKT_TX_COUNT + 1 ];
115
116
#endif
116
117
#endif
118
+ #endif
117
119
118
120
#define MODULO_INC (val , max ) {val = (++val < max) ? val : 0; }
119
121
@@ -149,6 +151,7 @@ static inline void dcache_clean(u32_t addr, u32_t size)
149
151
SCB_CleanDCache_by_Addr ((uint32_t * )start_addr , size_full );
150
152
}
151
153
154
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
152
155
/*
153
156
* Reset ring buffer
154
157
*/
@@ -185,6 +188,7 @@ static void ring_buf_put(struct ring_buf *rb, u32_t val)
185
188
__ASSERT (rb -> tail != rb -> head ,
186
189
"ring buffer overflow" );
187
190
}
191
+ #endif
188
192
189
193
/*
190
194
* Free pre-reserved RX buffers
@@ -276,11 +280,13 @@ static void tx_descriptors_init(Gmac *gmac, struct gmac_queue *queue)
276
280
/* Set the wrap bit on the last descriptor */
277
281
tx_desc_list -> buf [tx_desc_list -> len - 1 ].w1 |= GMAC_TXW1_WRAP ;
278
282
283
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
279
284
/* Reset TX frame list */
280
285
ring_buf_reset (& queue -> tx_frag_list );
281
286
#if defined(CONFIG_PTP_CLOCK_SAM_GMAC )
282
287
ring_buf_reset (& queue -> tx_frames );
283
288
#endif
289
+ #endif
284
290
}
285
291
286
292
#if defined(CONFIG_PTP_CLOCK_SAM_GMAC )
@@ -478,6 +484,9 @@ static inline struct net_if *get_iface(struct eth_sam_dev_data *ctx,
478
484
*/
479
485
static void tx_completed (Gmac * gmac , struct gmac_queue * queue )
480
486
{
487
+ #if GMAC_MULTIPLE_TX_PACKETS == 0
488
+ k_sem_give (& queue -> tx_sem );
489
+ #else
481
490
struct gmac_desc_list * tx_desc_list = & queue -> tx_desc_list ;
482
491
struct gmac_desc * tx_desc ;
483
492
struct net_buf * frag ;
@@ -529,25 +538,29 @@ static void tx_completed(Gmac *gmac, struct gmac_queue *queue)
529
538
break ;
530
539
}
531
540
}
541
+ #endif
532
542
}
533
543
534
544
/*
535
545
* Reset TX queue when errors are detected
536
546
*/
537
547
static void tx_error_handler (Gmac * gmac , struct gmac_queue * queue )
538
548
{
549
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
539
550
struct net_buf * frag ;
540
551
struct ring_buf * tx_frag_list = & queue -> tx_frag_list ;
541
552
#if defined(CONFIG_PTP_CLOCK_SAM_GMAC )
542
553
struct net_pkt * pkt ;
543
554
struct ring_buf * tx_frames = & queue -> tx_frames ;
555
+ #endif
544
556
#endif
545
557
546
558
queue -> err_tx_flushed_count ++ ;
547
559
548
560
/* Stop transmission, clean transmit pipeline and control registers */
549
561
gmac -> GMAC_NCR &= ~GMAC_NCR_TXEN ;
550
562
563
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
551
564
/* Free all frag resources in the TX path */
552
565
while (tx_frag_list -> tail != tx_frag_list -> head ) {
553
566
/* Release net buffer to the buffer pool */
@@ -570,10 +583,16 @@ static void tx_error_handler(Gmac *gmac, struct gmac_queue *queue)
570
583
571
584
/* Reinitialize TX descriptor list */
572
585
k_sem_reset (& queue -> tx_desc_sem );
573
- tx_descriptors_init (gmac , queue );
574
586
for (int i = 0 ; i < queue -> tx_desc_list .len - 1 ; i ++ ) {
575
587
k_sem_give (& queue -> tx_desc_sem );
576
588
}
589
+ #endif
590
+ tx_descriptors_init (gmac , queue );
591
+
592
+ #if GMAC_MULTIPLE_TX_PACKETS == 0
593
+ /* Reinitialize TX mutex */
594
+ k_sem_give (& queue -> tx_sem );
595
+ #endif
577
596
578
597
/* Restart transmission */
579
598
gmac -> GMAC_NCR |= GMAC_NCR_TXEN ;
@@ -923,12 +942,19 @@ static int nonpriority_queue_init(Gmac *gmac, struct gmac_queue *queue)
923
942
924
943
tx_descriptors_init (gmac , queue );
925
944
945
+ #if GMAC_MULTIPLE_TX_PACKETS == 0
946
+ /* Initialize TX semaphore. This semaphore is used to wait until the TX
947
+ * data has been sent.
948
+ */
949
+ k_sem_init (& queue -> tx_sem , 0 , 1 );
950
+ #else
926
951
/* Initialize TX descriptors semaphore. The semaphore is required as the
927
952
* size of the TX descriptor list is limited while the number of TX data
928
953
* buffers is not.
929
954
*/
930
955
k_sem_init (& queue -> tx_desc_sem , queue -> tx_desc_list .len - 1 ,
931
956
queue -> tx_desc_list .len - 1 );
957
+ #endif
932
958
933
959
/* Set Receive Buffer Queue Pointer Register */
934
960
gmac -> GMAC_RBQB = (u32_t )queue -> rx_desc_list .buf ;
@@ -983,8 +1009,12 @@ static int priority_queue_init(Gmac *gmac, struct gmac_queue *queue)
983
1009
984
1010
tx_descriptors_init (gmac , queue );
985
1011
1012
+ #if GMAC_MULTIPLE_TX_PACKETS == 0
1013
+ k_sem_init (& queue -> tx_sem , 0 , 1 );
1014
+ #else
986
1015
k_sem_init (& queue -> tx_desc_sem , queue -> tx_desc_list .len - 1 ,
987
1016
queue -> tx_desc_list .len - 1 );
1017
+ #endif
988
1018
989
1019
/* Setup RX buffer size for DMA */
990
1020
gmac -> GMAC_RBSRPQ [queue_index ] =
@@ -1263,8 +1293,19 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
1263
1293
u8_t * frag_data ;
1264
1294
u16_t frag_len ;
1265
1295
u32_t err_tx_flushed_count_at_entry ;
1296
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
1266
1297
unsigned int key ;
1298
+ #endif
1267
1299
u8_t pkt_prio ;
1300
+ #if GMAC_MULTIPLE_TX_PACKETS == 0
1301
+ #if defined(CONFIG_PTP_CLOCK_SAM_GMAC )
1302
+ u16_t vlan_tag = NET_VLAN_TAG_UNSPEC ;
1303
+ struct gptp_hdr * hdr ;
1304
+ #if defined(CONFIG_NET_VLAN )
1305
+ struct net_eth_hdr * eth_hdr ;
1306
+ #endif
1307
+ #endif
1308
+ #endif
1268
1309
1269
1310
__ASSERT (pkt , "buf pointer is NULL" );
1270
1311
__ASSERT (pkt -> frags , "Frame data missing" );
@@ -1297,6 +1338,7 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
1297
1338
/* Assure cache coherency before DMA read operation */
1298
1339
dcache_clean ((u32_t )frag_data , frag -> size );
1299
1340
1341
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
1300
1342
k_sem_take (& queue -> tx_desc_sem , K_FOREVER );
1301
1343
1302
1344
/* The following section becomes critical and requires IRQ lock
@@ -1311,6 +1353,7 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
1311
1353
irq_unlock (key );
1312
1354
return - EIO ;
1313
1355
}
1356
+ #endif
1314
1357
1315
1358
tx_desc = & tx_desc_list -> buf [tx_desc_list -> head ];
1316
1359
@@ -1329,6 +1372,7 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
1329
1372
/* Update descriptor position */
1330
1373
MODULO_INC (tx_desc_list -> head , tx_desc_list -> len );
1331
1374
1375
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
1332
1376
__ASSERT (tx_desc_list -> head != tx_desc_list -> tail ,
1333
1377
"tx_desc_list overflow" );
1334
1378
@@ -1339,18 +1383,21 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
1339
1383
net_pkt_frag_ref (frag );
1340
1384
1341
1385
irq_unlock (key );
1386
+ #endif
1342
1387
1343
1388
/* Continue with the rest of fragments (only data) */
1344
1389
frag = frag -> frags ;
1345
1390
}
1346
1391
1392
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
1347
1393
key = irq_lock ();
1348
1394
1349
1395
/* Check if tx_error_handler() function was executed */
1350
1396
if (queue -> err_tx_flushed_count != err_tx_flushed_count_at_entry ) {
1351
1397
irq_unlock (key );
1352
1398
return - EIO ;
1353
1399
}
1400
+ #endif
1354
1401
1355
1402
/* Ensure the descriptor following the last one is marked as used */
1356
1403
tx_desc_list -> buf [tx_desc_list -> head ].w1 = GMAC_TXW1_USED ;
@@ -1365,6 +1412,7 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
1365
1412
*/
1366
1413
tx_first_desc -> w1 &= ~GMAC_TXW1_USED ;
1367
1414
1415
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
1368
1416
#if defined(CONFIG_PTP_CLOCK_SAM_GMAC )
1369
1417
/* Account for a sent frame */
1370
1418
ring_buf_put (& queue -> tx_frames , POINTER_TO_UINT (pkt ));
@@ -1374,6 +1422,7 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
1374
1422
#endif
1375
1423
1376
1424
irq_unlock (key );
1425
+ #endif
1377
1426
1378
1427
/* Guarantee that the first fragment got its bit removed before starting
1379
1428
* sending packets to avoid packets getting stuck.
@@ -1383,6 +1432,29 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
1383
1432
/* Start transmission */
1384
1433
gmac -> GMAC_NCR |= GMAC_NCR_TSTART ;
1385
1434
1435
+ #if GMAC_MULTIPLE_TX_PACKETS == 0
1436
+ /* Wait until the packet is sent */
1437
+ k_sem_take (& queue -> tx_sem , K_FOREVER );
1438
+
1439
+ /* Check if transmit successful or not */
1440
+ if (queue -> err_tx_flushed_count != err_tx_flushed_count_at_entry ) {
1441
+ return - EIO ;
1442
+ }
1443
+ #if defined(CONFIG_PTP_CLOCK_SAM_GMAC )
1444
+ #if defined(CONFIG_NET_VLAN )
1445
+ eth_hdr = NET_ETH_HDR (pkt );
1446
+ if (ntohs (eth_hdr -> type ) == NET_ETH_PTYPE_VLAN ) {
1447
+ vlan_tag = net_pkt_vlan_tag (pkt );
1448
+ }
1449
+ #endif
1450
+ hdr = check_gptp_msg (get_iface (dev_data , vlan_tag ), pkt , true);
1451
+ timestamp_tx_pkt (gmac , hdr , pkt );
1452
+ if (hdr && need_timestamping (hdr )) {
1453
+ net_if_add_tx_timestamp (pkt );
1454
+ }
1455
+ #endif
1456
+ #endif
1457
+
1386
1458
return 0 ;
1387
1459
}
1388
1460
@@ -1421,10 +1493,12 @@ static void queue0_isr(void *arg)
1421
1493
if (isr & GMAC_INT_TX_ERR_BITS ) {
1422
1494
tx_error_handler (gmac , queue );
1423
1495
} else if (isr & GMAC_ISR_TCOMP ) {
1496
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
1424
1497
tail_desc = & tx_desc_list -> buf [tx_desc_list -> tail ];
1425
1498
LOG_DBG ("tx.w1=0x%08x, tail=%d" ,
1426
1499
tail_desc -> w1 ,
1427
1500
tx_desc_list -> tail );
1501
+ #endif
1428
1502
1429
1503
tx_completed (gmac , queue );
1430
1504
}
@@ -1469,10 +1543,12 @@ static inline void priority_queue_isr(void *arg, unsigned int queue_idx)
1469
1543
if (isrpq & GMAC_INTPQ_TX_ERR_BITS ) {
1470
1544
tx_error_handler (gmac , queue );
1471
1545
} else if (isrpq & GMAC_ISRPQ_TCOMP ) {
1546
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
1472
1547
tail_desc = & tx_desc_list -> buf [tx_desc_list -> tail ];
1473
1548
LOG_DBG ("tx.w1=0x%08x, tail=%d" ,
1474
1549
tail_desc -> w1 ,
1475
1550
tx_desc_list -> tail );
1551
+ #endif
1476
1552
1477
1553
tx_completed (gmac , queue );
1478
1554
}
@@ -1894,6 +1970,7 @@ static struct eth_sam_dev_data eth0_data = {
1894
1970
.buf = (u32_t * )rx_frag_list_que0 ,
1895
1971
.len = ARRAY_SIZE (rx_frag_list_que0 ),
1896
1972
},
1973
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
1897
1974
.tx_frag_list = {
1898
1975
.buf = (u32_t * )tx_frag_list_que0 ,
1899
1976
.len = ARRAY_SIZE (tx_frag_list_que0 ),
@@ -1903,6 +1980,7 @@ static struct eth_sam_dev_data eth0_data = {
1903
1980
.buf = (u32_t * )tx_frame_list_que0 ,
1904
1981
.len = ARRAY_SIZE (tx_frame_list_que0 ),
1905
1982
},
1983
+ #endif
1906
1984
#endif
1907
1985
}, {
1908
1986
.que_idx = GMAC_QUE_1 ,
@@ -1919,6 +1997,7 @@ static struct eth_sam_dev_data eth0_data = {
1919
1997
.buf = (u32_t * )rx_frag_list_que1 ,
1920
1998
.len = ARRAY_SIZE (rx_frag_list_que1 ),
1921
1999
},
2000
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
1922
2001
.tx_frag_list = {
1923
2002
.buf = (u32_t * )tx_frag_list_que1 ,
1924
2003
.len = ARRAY_SIZE (tx_frag_list_que1 ),
@@ -1929,6 +2008,7 @@ static struct eth_sam_dev_data eth0_data = {
1929
2008
.len = ARRAY_SIZE (tx_frame_list_que1 ),
1930
2009
}
1931
2010
#endif
2011
+ #endif
1932
2012
#endif
1933
2013
}, {
1934
2014
.que_idx = GMAC_QUE_2 ,
@@ -1945,6 +2025,7 @@ static struct eth_sam_dev_data eth0_data = {
1945
2025
.buf = (u32_t * )rx_frag_list_que2 ,
1946
2026
.len = ARRAY_SIZE (rx_frag_list_que2 ),
1947
2027
},
2028
+ #if GMAC_MULTIPLE_TX_PACKETS == 1
1948
2029
.tx_frag_list = {
1949
2030
.buf = (u32_t * )tx_frag_list_que2 ,
1950
2031
.len = ARRAY_SIZE (tx_frag_list_que2 ),
@@ -1955,6 +2036,7 @@ static struct eth_sam_dev_data eth0_data = {
1955
2036
.len = ARRAY_SIZE (tx_frame_list_que2 ),
1956
2037
}
1957
2038
#endif
2039
+ #endif
1958
2040
#endif
1959
2041
}
1960
2042
},
0 commit comments