@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
214
214
* @running: true if the channel is running
215
215
* @first_frame: flag for the first frame of stream
216
216
* @video_group: flag if multi-channel operation is needed for video channels
217
- * @lock: lock to access struct xilinx_dpdma_chan
217
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
218
+ * @vchan.lock, if both are to be held.
218
219
* @desc_pool: descriptor allocation pool
219
220
* @err_task: error IRQ bottom half handler
220
221
* @desc: References to descriptors being processed
@@ -1175,12 +1176,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
1175
1176
* Complete the active descriptor, if any, promote the pending
1176
1177
* descriptor to active, and queue the next transfer, if any.
1177
1178
*/
1179
+ spin_lock (& chan -> vchan .lock );
1178
1180
if (chan -> desc .active )
1179
1181
vchan_cookie_complete (& chan -> desc .active -> vdesc );
1180
1182
chan -> desc .active = pending ;
1181
1183
chan -> desc .pending = NULL ;
1182
1184
1183
1185
xilinx_dpdma_chan_queue_transfer (chan );
1186
+ spin_unlock (& chan -> vchan .lock );
1184
1187
1185
1188
out :
1186
1189
spin_unlock_irqrestore (& chan -> lock , flags );
@@ -1359,10 +1362,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
1359
1362
struct xilinx_dpdma_chan * chan = to_xilinx_chan (dchan );
1360
1363
unsigned long flags ;
1361
1364
1362
- spin_lock_irqsave (& chan -> vchan .lock , flags );
1365
+ spin_lock_irqsave (& chan -> lock , flags );
1366
+ spin_lock (& chan -> vchan .lock );
1363
1367
if (vchan_issue_pending (& chan -> vchan ))
1364
1368
xilinx_dpdma_chan_queue_transfer (chan );
1365
- spin_unlock_irqrestore (& chan -> vchan .lock , flags );
1369
+ spin_unlock (& chan -> vchan .lock );
1370
+ spin_unlock_irqrestore (& chan -> lock , flags );
1366
1371
}
1367
1372
1368
1373
static int xilinx_dpdma_config (struct dma_chan * dchan ,
@@ -1590,7 +1595,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
1590
1595
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan -> id );
1591
1596
1592
1597
spin_lock_irqsave (& chan -> lock , flags );
1598
+ spin_lock (& chan -> vchan .lock );
1593
1599
xilinx_dpdma_chan_queue_transfer (chan );
1600
+ spin_unlock (& chan -> vchan .lock );
1594
1601
spin_unlock_irqrestore (& chan -> lock , flags );
1595
1602
}
1596
1603
0 commit comments