Skip to content

Commit cc1d147

Browse files
aurel32nashif
authored andcommitted
driver: eth: gmac: wait until the packet is sent before returning
Scale down the TX path of the GMAC driver by waiting for a packet to be fully sent before returning from the send function. This has a small performance impact, but has a few advantages: - It allows the Ethernet code to modify the packet afterward, fixing PTP support on this board (see PR #12563). - It returns an error to the IP stack in case of a transmit failure. - It doesn't require net_buf to be thread safe. This change can be reverted by changing GMAC_MULTIPLE_TX_PACKETS from 0 to 1. Signed-off-by: Aurelien Jarno <[email protected]>
1 parent 1d55411 commit cc1d147

File tree

2 files changed

+97
-1
lines changed

2 files changed

+97
-1
lines changed

drivers/ethernet/eth_sam_gmac.c

Lines changed: 83 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ static struct net_buf *rx_frag_list_que1[PRIORITY_QUEUE1_RX_DESC_COUNT];
9696
#if GMAC_PRIORITY_QUEUE_NO == 2
9797
static struct net_buf *rx_frag_list_que2[PRIORITY_QUEUE2_RX_DESC_COUNT];
9898
#endif
99+
#if GMAC_MULTIPLE_TX_PACKETS == 1
99100
/* TX buffer accounting list */
100101
static struct net_buf *tx_frag_list_que0[MAIN_QUEUE_TX_DESC_COUNT];
101102
#if GMAC_PRIORITY_QUEUE_NO >= 1
@@ -114,6 +115,7 @@ static struct net_pkt *tx_frame_list_que1[CONFIG_NET_PKT_TX_COUNT + 1];
114115
static struct net_pkt *tx_frame_list_que2[CONFIG_NET_PKT_TX_COUNT + 1];
115116
#endif
116117
#endif
118+
#endif
117119

118120
#define MODULO_INC(val, max) {val = (++val < max) ? val : 0; }
119121

@@ -149,6 +151,7 @@ static inline void dcache_clean(u32_t addr, u32_t size)
149151
SCB_CleanDCache_by_Addr((uint32_t *)start_addr, size_full);
150152
}
151153

154+
#if GMAC_MULTIPLE_TX_PACKETS == 1
152155
/*
153156
* Reset ring buffer
154157
*/
@@ -185,6 +188,7 @@ static void ring_buf_put(struct ring_buf *rb, u32_t val)
185188
__ASSERT(rb->tail != rb->head,
186189
"ring buffer overflow");
187190
}
191+
#endif
188192

189193
/*
190194
* Free pre-reserved RX buffers
@@ -276,11 +280,13 @@ static void tx_descriptors_init(Gmac *gmac, struct gmac_queue *queue)
276280
/* Set the wrap bit on the last descriptor */
277281
tx_desc_list->buf[tx_desc_list->len - 1].w1 |= GMAC_TXW1_WRAP;
278282

283+
#if GMAC_MULTIPLE_TX_PACKETS == 1
279284
/* Reset TX frame list */
280285
ring_buf_reset(&queue->tx_frag_list);
281286
#if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
282287
ring_buf_reset(&queue->tx_frames);
283288
#endif
289+
#endif
284290
}
285291

286292
#if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
@@ -478,6 +484,9 @@ static inline struct net_if *get_iface(struct eth_sam_dev_data *ctx,
478484
*/
479485
static void tx_completed(Gmac *gmac, struct gmac_queue *queue)
480486
{
487+
#if GMAC_MULTIPLE_TX_PACKETS == 0
488+
k_sem_give(&queue->tx_sem);
489+
#else
481490
struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list;
482491
struct gmac_desc *tx_desc;
483492
struct net_buf *frag;
@@ -529,25 +538,29 @@ static void tx_completed(Gmac *gmac, struct gmac_queue *queue)
529538
break;
530539
}
531540
}
541+
#endif
532542
}
533543

534544
/*
535545
* Reset TX queue when errors are detected
536546
*/
537547
static void tx_error_handler(Gmac *gmac, struct gmac_queue *queue)
538548
{
549+
#if GMAC_MULTIPLE_TX_PACKETS == 1
539550
struct net_buf *frag;
540551
struct ring_buf *tx_frag_list = &queue->tx_frag_list;
541552
#if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
542553
struct net_pkt *pkt;
543554
struct ring_buf *tx_frames = &queue->tx_frames;
555+
#endif
544556
#endif
545557

546558
queue->err_tx_flushed_count++;
547559

548560
/* Stop transmission, clean transmit pipeline and control registers */
549561
gmac->GMAC_NCR &= ~GMAC_NCR_TXEN;
550562

563+
#if GMAC_MULTIPLE_TX_PACKETS == 1
551564
/* Free all frag resources in the TX path */
552565
while (tx_frag_list->tail != tx_frag_list->head) {
553566
/* Release net buffer to the buffer pool */
@@ -570,10 +583,16 @@ static void tx_error_handler(Gmac *gmac, struct gmac_queue *queue)
570583

571584
/* Reinitialize TX descriptor list */
572585
k_sem_reset(&queue->tx_desc_sem);
573-
tx_descriptors_init(gmac, queue);
574586
for (int i = 0; i < queue->tx_desc_list.len - 1; i++) {
575587
k_sem_give(&queue->tx_desc_sem);
576588
}
589+
#endif
590+
tx_descriptors_init(gmac, queue);
591+
592+
#if GMAC_MULTIPLE_TX_PACKETS == 0
593+
/* Reinitialize TX mutex */
594+
k_sem_give(&queue->tx_sem);
595+
#endif
577596

578597
/* Restart transmission */
579598
gmac->GMAC_NCR |= GMAC_NCR_TXEN;
@@ -923,12 +942,19 @@ static int nonpriority_queue_init(Gmac *gmac, struct gmac_queue *queue)
923942

924943
tx_descriptors_init(gmac, queue);
925944

945+
#if GMAC_MULTIPLE_TX_PACKETS == 0
946+
/* Initialize TX semaphore. This semaphore is used to wait until the TX
947+
* data has been sent.
948+
*/
949+
k_sem_init(&queue->tx_sem, 0, 1);
950+
#else
926951
/* Initialize TX descriptors semaphore. The semaphore is required as the
927952
* size of the TX descriptor list is limited while the number of TX data
928953
* buffers is not.
929954
*/
930955
k_sem_init(&queue->tx_desc_sem, queue->tx_desc_list.len - 1,
931956
queue->tx_desc_list.len - 1);
957+
#endif
932958

933959
/* Set Receive Buffer Queue Pointer Register */
934960
gmac->GMAC_RBQB = (u32_t)queue->rx_desc_list.buf;
@@ -983,8 +1009,12 @@ static int priority_queue_init(Gmac *gmac, struct gmac_queue *queue)
9831009

9841010
tx_descriptors_init(gmac, queue);
9851011

1012+
#if GMAC_MULTIPLE_TX_PACKETS == 0
1013+
k_sem_init(&queue->tx_sem, 0, 1);
1014+
#else
9861015
k_sem_init(&queue->tx_desc_sem, queue->tx_desc_list.len - 1,
9871016
queue->tx_desc_list.len - 1);
1017+
#endif
9881018

9891019
/* Setup RX buffer size for DMA */
9901020
gmac->GMAC_RBSRPQ[queue_index] =
@@ -1263,8 +1293,19 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
12631293
u8_t *frag_data;
12641294
u16_t frag_len;
12651295
u32_t err_tx_flushed_count_at_entry;
1296+
#if GMAC_MULTIPLE_TX_PACKETS == 1
12661297
unsigned int key;
1298+
#endif
12671299
u8_t pkt_prio;
1300+
#if GMAC_MULTIPLE_TX_PACKETS == 0
1301+
#if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
1302+
u16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
1303+
struct gptp_hdr *hdr;
1304+
#if defined(CONFIG_NET_VLAN)
1305+
struct net_eth_hdr *eth_hdr;
1306+
#endif
1307+
#endif
1308+
#endif
12681309

12691310
__ASSERT(pkt, "buf pointer is NULL");
12701311
__ASSERT(pkt->frags, "Frame data missing");
@@ -1297,6 +1338,7 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
12971338
/* Assure cache coherency before DMA read operation */
12981339
dcache_clean((u32_t)frag_data, frag->size);
12991340

1341+
#if GMAC_MULTIPLE_TX_PACKETS == 1
13001342
k_sem_take(&queue->tx_desc_sem, K_FOREVER);
13011343

13021344
/* The following section becomes critical and requires IRQ lock
@@ -1311,6 +1353,7 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
13111353
irq_unlock(key);
13121354
return -EIO;
13131355
}
1356+
#endif
13141357

13151358
tx_desc = &tx_desc_list->buf[tx_desc_list->head];
13161359

@@ -1329,6 +1372,7 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
13291372
/* Update descriptor position */
13301373
MODULO_INC(tx_desc_list->head, tx_desc_list->len);
13311374

1375+
#if GMAC_MULTIPLE_TX_PACKETS == 1
13321376
__ASSERT(tx_desc_list->head != tx_desc_list->tail,
13331377
"tx_desc_list overflow");
13341378

@@ -1339,18 +1383,21 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
13391383
net_pkt_frag_ref(frag);
13401384

13411385
irq_unlock(key);
1386+
#endif
13421387

13431388
/* Continue with the rest of fragments (only data) */
13441389
frag = frag->frags;
13451390
}
13461391

1392+
#if GMAC_MULTIPLE_TX_PACKETS == 1
13471393
key = irq_lock();
13481394

13491395
/* Check if tx_error_handler() function was executed */
13501396
if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) {
13511397
irq_unlock(key);
13521398
return -EIO;
13531399
}
1400+
#endif
13541401

13551402
/* Ensure the descriptor following the last one is marked as used */
13561403
tx_desc_list->buf[tx_desc_list->head].w1 = GMAC_TXW1_USED;
@@ -1365,6 +1412,7 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
13651412
*/
13661413
tx_first_desc->w1 &= ~GMAC_TXW1_USED;
13671414

1415+
#if GMAC_MULTIPLE_TX_PACKETS == 1
13681416
#if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
13691417
/* Account for a sent frame */
13701418
ring_buf_put(&queue->tx_frames, POINTER_TO_UINT(pkt));
@@ -1374,6 +1422,7 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
13741422
#endif
13751423

13761424
irq_unlock(key);
1425+
#endif
13771426

13781427
/* Guarantee that the first fragment got its bit removed before starting
13791428
* sending packets to avoid packets getting stuck.
@@ -1383,6 +1432,29 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
13831432
/* Start transmission */
13841433
gmac->GMAC_NCR |= GMAC_NCR_TSTART;
13851434

1435+
#if GMAC_MULTIPLE_TX_PACKETS == 0
1436+
/* Wait until the packet is sent */
1437+
k_sem_take(&queue->tx_sem, K_FOREVER);
1438+
1439+
/* Check if transmit successful or not */
1440+
if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) {
1441+
return -EIO;
1442+
}
1443+
#if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
1444+
#if defined(CONFIG_NET_VLAN)
1445+
eth_hdr = NET_ETH_HDR(pkt);
1446+
if (ntohs(eth_hdr->type) == NET_ETH_PTYPE_VLAN) {
1447+
vlan_tag = net_pkt_vlan_tag(pkt);
1448+
}
1449+
#endif
1450+
hdr = check_gptp_msg(get_iface(dev_data, vlan_tag), pkt, true);
1451+
timestamp_tx_pkt(gmac, hdr, pkt);
1452+
if (hdr && need_timestamping(hdr)) {
1453+
net_if_add_tx_timestamp(pkt);
1454+
}
1455+
#endif
1456+
#endif
1457+
13861458
return 0;
13871459
}
13881460

@@ -1421,10 +1493,12 @@ static void queue0_isr(void *arg)
14211493
if (isr & GMAC_INT_TX_ERR_BITS) {
14221494
tx_error_handler(gmac, queue);
14231495
} else if (isr & GMAC_ISR_TCOMP) {
1496+
#if GMAC_MULTIPLE_TX_PACKETS == 1
14241497
tail_desc = &tx_desc_list->buf[tx_desc_list->tail];
14251498
LOG_DBG("tx.w1=0x%08x, tail=%d",
14261499
tail_desc->w1,
14271500
tx_desc_list->tail);
1501+
#endif
14281502

14291503
tx_completed(gmac, queue);
14301504
}
@@ -1469,10 +1543,12 @@ static inline void priority_queue_isr(void *arg, unsigned int queue_idx)
14691543
if (isrpq & GMAC_INTPQ_TX_ERR_BITS) {
14701544
tx_error_handler(gmac, queue);
14711545
} else if (isrpq & GMAC_ISRPQ_TCOMP) {
1546+
#if GMAC_MULTIPLE_TX_PACKETS == 1
14721547
tail_desc = &tx_desc_list->buf[tx_desc_list->tail];
14731548
LOG_DBG("tx.w1=0x%08x, tail=%d",
14741549
tail_desc->w1,
14751550
tx_desc_list->tail);
1551+
#endif
14761552

14771553
tx_completed(gmac, queue);
14781554
}
@@ -1894,6 +1970,7 @@ static struct eth_sam_dev_data eth0_data = {
18941970
.buf = (u32_t *)rx_frag_list_que0,
18951971
.len = ARRAY_SIZE(rx_frag_list_que0),
18961972
},
1973+
#if GMAC_MULTIPLE_TX_PACKETS == 1
18971974
.tx_frag_list = {
18981975
.buf = (u32_t *)tx_frag_list_que0,
18991976
.len = ARRAY_SIZE(tx_frag_list_que0),
@@ -1903,6 +1980,7 @@ static struct eth_sam_dev_data eth0_data = {
19031980
.buf = (u32_t *)tx_frame_list_que0,
19041981
.len = ARRAY_SIZE(tx_frame_list_que0),
19051982
},
1983+
#endif
19061984
#endif
19071985
}, {
19081986
.que_idx = GMAC_QUE_1,
@@ -1919,6 +1997,7 @@ static struct eth_sam_dev_data eth0_data = {
19191997
.buf = (u32_t *)rx_frag_list_que1,
19201998
.len = ARRAY_SIZE(rx_frag_list_que1),
19211999
},
2000+
#if GMAC_MULTIPLE_TX_PACKETS == 1
19222001
.tx_frag_list = {
19232002
.buf = (u32_t *)tx_frag_list_que1,
19242003
.len = ARRAY_SIZE(tx_frag_list_que1),
@@ -1929,6 +2008,7 @@ static struct eth_sam_dev_data eth0_data = {
19292008
.len = ARRAY_SIZE(tx_frame_list_que1),
19302009
}
19312010
#endif
2011+
#endif
19322012
#endif
19332013
}, {
19342014
.que_idx = GMAC_QUE_2,
@@ -1945,6 +2025,7 @@ static struct eth_sam_dev_data eth0_data = {
19452025
.buf = (u32_t *)rx_frag_list_que2,
19462026
.len = ARRAY_SIZE(rx_frag_list_que2),
19472027
},
2028+
#if GMAC_MULTIPLE_TX_PACKETS == 1
19482029
.tx_frag_list = {
19492030
.buf = (u32_t *)tx_frag_list_que2,
19502031
.len = ARRAY_SIZE(tx_frag_list_que2),
@@ -1955,6 +2036,7 @@ static struct eth_sam_dev_data eth0_data = {
19552036
.len = ARRAY_SIZE(tx_frame_list_que2),
19562037
}
19572038
#endif
2039+
#endif
19582040
#endif
19592041
}
19602042
},

drivers/ethernet/eth_sam_gmac_priv.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,13 @@
1212

1313
#include <zephyr/types.h>
1414

15+
/* This option enables support to push multiple packets to the DMA engine.
16+
* This currently doesn't work given the current version of net_pkt or
17+
* net_buf does not allowed access from multiple threads. This option is
18+
* therefore currenlty disabled.
19+
*/
20+
#define GMAC_MULTIPLE_TX_PACKETS 0
21+
1522
#define GMAC_MTU 1500
1623
#define GMAC_FRAME_SIZE_MAX (GMAC_MTU + 18)
1724

@@ -164,12 +171,19 @@ struct gmac_desc_list {
164171
struct gmac_queue {
165172
struct gmac_desc_list rx_desc_list;
166173
struct gmac_desc_list tx_desc_list;
174+
#if GMAC_MULTIPLE_TX_PACKETS == 1
167175
struct k_sem tx_desc_sem;
176+
#else
177+
struct k_sem tx_sem;
178+
#endif
168179

169180
struct ring_buf rx_frag_list;
181+
182+
#if GMAC_MULTIPLE_TX_PACKETS == 1
170183
struct ring_buf tx_frag_list;
171184
#if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
172185
struct ring_buf tx_frames;
186+
#endif
173187
#endif
174188

175189
/** Number of RX frames dropped by the driver */

0 commit comments

Comments
 (0)