@@ -63,7 +63,7 @@ typedef struct
63
63
uint8_t * buffer ;
64
64
uint16_t total_len ;
65
65
volatile uint16_t actual_len ;
66
- uint16_t mps ; // max packet size
66
+ uint16_t mps ; // max packet size
67
67
68
68
// nRF will auto accept OUT packet after DMA is done
69
69
// indicate packet is already ACK
@@ -82,11 +82,8 @@ static struct
82
82
// +1 for ISO endpoints
83
83
xfer_td_t xfer [EP_CBI_COUNT + 1 ][2 ];
84
84
85
- // Number of pending DMA that is started but not handled yet by dcd_int_handler().
86
- // Since nRF can only carry one DMA can run at a time, this value is normally be either 0 or 1.
87
- // However, in critical section with interrupt disabled, the DMA can be finished and added up
88
- // until handled by dcd_int_handler() when exiting critical section.
89
- volatile uint8_t dma_pending ;
85
+ // nRF can only carry one DMA at a time, this is used to guard the access to EasyDMA
86
+ volatile bool dma_running ;
90
87
}_dcd ;
91
88
92
89
/*------------------------------------------------------------------*/
@@ -115,67 +112,68 @@ TU_ATTR_ALWAYS_INLINE static inline bool is_in_isr(void)
115
112
}
116
113
117
114
// helper to start DMA
115
+ static void start_dma (volatile uint32_t * reg_startep )
116
+ {
117
+ _dcd .dma_running = true;
118
+
119
+ (* reg_startep ) = 1 ;
120
+ __ISB (); __DSB ();
121
+
122
+ // TASKS_EP0STATUS, TASKS_EP0RCVOUT seem to need EasyDMA to be available
123
+ // However these don't trigger any DMA transfer and got ENDED event subsequently
124
+ // Therefore dma_pending is corrected right away
125
+ if ( (reg_startep == & NRF_USBD -> TASKS_EP0STATUS ) || (reg_startep == & NRF_USBD -> TASKS_EP0RCVOUT ) )
126
+ {
127
+ _dcd .dma_running = false;
128
+ }
129
+ }
130
+
131
+ // only 1 EasyDMA can be active at any time
118
132
// TODO use Cortex M4 LDREX and STREX command (atomic) to have better mutex access to EasyDMA
119
133
// since current implementation does not 100% guarded against race condition
120
134
static void edpt_dma_start (volatile uint32_t * reg_startep )
121
135
{
122
- // Only one dma can be active
123
- if ( _dcd . dma_pending )
136
+ // Called in critical section i.e within USB ISR, or USB/Global interrupt disabled
137
+ if ( is_in_isr () || __get_PRIMASK () || ! NVIC_GetEnableIRQ ( USBD_IRQn ) )
124
138
{
125
- if (is_in_isr () )
139
+ if (_dcd . dma_running )
126
140
{
127
- // Called within ISR, use usbd task to defer later
141
+ //use usbd task to defer later
128
142
usbd_defer_func ((osal_task_func_t ) edpt_dma_start , (void * ) (uintptr_t ) reg_startep , true);
129
- return ;
143
+ }else
144
+ {
145
+ start_dma (reg_startep );
130
146
}
131
- else
147
+ }else
148
+ {
149
+ // Called in non-critical thread-mode, should be 99% of the time.
150
+ // Should be safe to blocking wait until previous DMA transfer complete
151
+ uint8_t const rhport = 0 ;
152
+ bool started = false;
153
+ while (!started )
132
154
{
133
- if ( __get_PRIMASK () || !NVIC_GetEnableIRQ (USBD_IRQn ) )
134
- {
135
- // Called in critical section with interrupt disabled. We have to manually check
136
- // for the DMA complete by comparing current pending DMA with number of ENDED Events
137
- uint32_t ended = 0 ;
155
+ // LDREX/STREX may be needed in form of std atomic (required C11) or
156
+ // use osal mutex to guard against multiple core MCUs such as nRF53
157
+ dcd_int_disable (rhport );
138
158
139
- while ( _dcd .dma_pending > ((uint8_t ) ended ) )
140
- {
141
- ended = NRF_USBD -> EVENTS_ENDISOIN + NRF_USBD -> EVENTS_ENDISOOUT ;
142
-
143
- for (uint8_t i = 0 ; i < EP_CBI_COUNT ; i ++ )
144
- {
145
- ended += NRF_USBD -> EVENTS_ENDEPIN [i ] + NRF_USBD -> EVENTS_ENDEPOUT [i ];
146
- }
147
- }
148
- }else
159
+ if ( !_dcd .dma_running )
149
160
{
150
- // Called in non-critical thread-mode, should be 99% of the time.
151
- // Should be safe to blocking wait until previous DMA transfer complete
152
- while ( _dcd .dma_pending ) { }
161
+ start_dma (reg_startep );
162
+ started = true;
153
163
}
154
- }
155
- }
156
164
157
- _dcd . dma_pending ++ ;
165
+ dcd_int_enable ( rhport ) ;
158
166
159
- (* reg_startep ) = 1 ;
160
- __ISB (); __DSB ();
167
+ // osal_yield();
168
+ }
169
+ }
161
170
}
162
171
163
172
// DMA is complete
164
173
static void edpt_dma_end (void )
165
174
{
166
- TU_ASSERT (_dcd .dma_pending , );
167
- _dcd .dma_pending = 0 ;
168
- }
169
-
170
- // helper to set TASKS_EP0STATUS / TASKS_EP0RCVOUT since they also need EasyDMA
171
- // However TASKS_EP0STATUS doesn't trigger any DMA transfer and got ENDED event subsequently
172
- // Therefore dma_running state will be corrected right away
173
- void start_ep0_task (volatile uint32_t * reg_task )
174
- {
175
- edpt_dma_start (reg_task );
176
-
177
- // correct the dma_running++ in dma start
178
- if (_dcd .dma_pending ) _dcd .dma_pending -- ;
175
+ TU_ASSERT (_dcd .dma_running , );
176
+ _dcd .dma_running = false;
179
177
}
180
178
181
179
// helper getting td
@@ -194,7 +192,10 @@ static void xact_out_dma(uint8_t epnum)
194
192
{
195
193
xact_len = NRF_USBD -> SIZE .ISOOUT ;
196
194
// If ZERO bit is set, ignore ISOOUT length
197
- if (xact_len & USBD_SIZE_ISOOUT_ZERO_Msk ) xact_len = 0 ;
195
+ if (xact_len & USBD_SIZE_ISOOUT_ZERO_Msk )
196
+ {
197
+ xact_len = 0 ;
198
+ }
198
199
else
199
200
{
200
201
// Trigger DMA move data from Endpoint -> SRAM
@@ -215,9 +216,6 @@ static void xact_out_dma(uint8_t epnum)
215
216
216
217
edpt_dma_start (& NRF_USBD -> TASKS_STARTEPOUT [epnum ]);
217
218
}
218
-
219
- xfer -> buffer += xact_len ;
220
- xfer -> actual_len += xact_len ;
221
219
}
222
220
223
221
// Prepare for a CBI transaction IN, call at the start
@@ -232,8 +230,6 @@ static void xact_in_dma(uint8_t epnum)
232
230
NRF_USBD -> EPIN [epnum ].PTR = (uint32_t ) xfer -> buffer ;
233
231
NRF_USBD -> EPIN [epnum ].MAXCNT = xact_len ;
234
232
235
- xfer -> buffer += xact_len ;
236
-
237
233
edpt_dma_start (& NRF_USBD -> TASKS_STARTEPIN [epnum ]);
238
234
}
239
235
@@ -242,6 +238,7 @@ static void xact_in_dma(uint8_t epnum)
242
238
//--------------------------------------------------------------------+
243
239
void dcd_init (uint8_t rhport )
244
240
{
241
+ TU_LOG1 ("dcd init\r\n" );
245
242
(void ) rhport ;
246
243
}
247
244
@@ -466,7 +463,7 @@ bool dcd_edpt_xfer (uint8_t rhport, uint8_t ep_addr, uint8_t * buffer, uint16_t
466
463
if ( control_status )
467
464
{
468
465
// Status Phase also requires EasyDMA has to be available as well !!!!
469
- start_ep0_task (& NRF_USBD -> TASKS_EP0STATUS );
466
+ edpt_dma_start (& NRF_USBD -> TASKS_EP0STATUS );
470
467
471
468
// The nRF doesn't interrupt on status transmit so we queue up a success response.
472
469
dcd_event_xfer_complete (0 , ep_addr , 0 , XFER_RESULT_SUCCESS , is_in_isr ());
@@ -476,7 +473,7 @@ bool dcd_edpt_xfer (uint8_t rhport, uint8_t ep_addr, uint8_t * buffer, uint16_t
476
473
if ( epnum == 0 )
477
474
{
478
475
// Accept next Control Out packet. TASKS_EP0RCVOUT also require EasyDMA
479
- start_ep0_task (& NRF_USBD -> TASKS_EP0RCVOUT );
476
+ edpt_dma_start (& NRF_USBD -> TASKS_EP0RCVOUT );
480
477
}else
481
478
{
482
479
if ( xfer -> data_received )
@@ -522,7 +519,6 @@ void dcd_edpt_stall (uint8_t rhport, uint8_t ep_addr)
522
519
// There maybe data in endpoint fifo already, we need to pull it out
523
520
if ( (dir == TUSB_DIR_OUT ) && xfer -> data_received )
524
521
{
525
- TU_LOG_LOCATION ();
526
522
xfer -> data_received = false;
527
523
xact_out_dma (epnum );
528
524
}
@@ -717,7 +713,8 @@ void dcd_int_handler(uint8_t rhport)
717
713
718
714
if ( int_status & EDPT_END_ALL_MASK )
719
715
{
720
- // DMA complete move data from SRAM -> Endpoint
716
+ // DMA complete move data from SRAM <-> Endpoint
717
+ // Must before endpoint transfer handling
721
718
edpt_dma_end ();
722
719
}
723
720
@@ -732,7 +729,7 @@ void dcd_int_handler(uint8_t rhport)
732
729
* - Host -> Endpoint
733
730
* EPDATA (or EP0DATADONE) interrupted, check EPDATASTATUS.EPOUT[i]
734
731
* to start DMA. For Bulk/Interrupt, this step can occur automatically (without sw),
735
- * which means data may or may not be ready (data_received flag).
732
+ * which means data may or may not be ready (out_received flag).
736
733
* - Endpoint -> RAM
737
734
* ENDEPOUT[i] interrupted, transaction complete, sw prepare next transaction
738
735
*
@@ -764,20 +761,16 @@ void dcd_int_handler(uint8_t rhport)
764
761
xfer_td_t * xfer = get_td (epnum , TUSB_DIR_OUT );
765
762
uint8_t const xact_len = NRF_USBD -> EPOUT [epnum ].AMOUNT ;
766
763
764
+ xfer -> buffer += xact_len ;
765
+ xfer -> actual_len += xact_len ;
766
+
767
767
// Transfer complete if transaction len < Max Packet Size or total len is transferred
768
768
if ( (epnum != EP_ISO_NUM ) && (xact_len == xfer -> mps ) && (xfer -> actual_len < xfer -> total_len ) )
769
769
{
770
770
if ( epnum == 0 )
771
771
{
772
772
// Accept next Control Out packet. TASKS_EP0RCVOUT also require EasyDMA
773
- if ( _dcd .dma_pending )
774
- {
775
- // use usbd task to defer later
776
- usbd_defer_func ((osal_task_func_t ) start_ep0_task , (void * ) (uintptr_t ) & NRF_USBD -> TASKS_EP0RCVOUT , true);
777
- }else
778
- {
779
- start_ep0_task (& NRF_USBD -> TASKS_EP0RCVOUT );
780
- }
773
+ edpt_dma_start (& NRF_USBD -> TASKS_EP0RCVOUT );
781
774
}else
782
775
{
783
776
// nRF auto accept next Bulk/Interrupt OUT packet
@@ -814,8 +807,10 @@ void dcd_int_handler(uint8_t rhport)
814
807
if ( tu_bit_test (data_status , epnum ) || (epnum == 0 && is_control_in ) )
815
808
{
816
809
xfer_td_t * xfer = get_td (epnum , TUSB_DIR_IN );
810
+ uint8_t const xact_len = NRF_USBD -> EPIN [epnum ].AMOUNT ;
817
811
818
- xfer -> actual_len += NRF_USBD -> EPIN [epnum ].MAXCNT ;
812
+ xfer -> buffer += xact_len ;
813
+ xfer -> actual_len += xact_len ;
819
814
820
815
if ( xfer -> actual_len < xfer -> total_len )
821
816
{
0 commit comments