@@ -214,6 +214,8 @@ static const u16 bnxt_async_events_arr[] = {
214
214
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE ,
215
215
};
216
216
217
+ static struct workqueue_struct * bnxt_pf_wq ;
218
+
217
219
static bool bnxt_vf_pciid (enum board_idx idx )
218
220
{
219
221
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF );
@@ -1024,12 +1026,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
1024
1026
return 0 ;
1025
1027
}
1026
1028
1029
+ static void bnxt_queue_sp_work (struct bnxt * bp )
1030
+ {
1031
+ if (BNXT_PF (bp ))
1032
+ queue_work (bnxt_pf_wq , & bp -> sp_task );
1033
+ else
1034
+ schedule_work (& bp -> sp_task );
1035
+ }
1036
+
1037
+ static void bnxt_cancel_sp_work (struct bnxt * bp )
1038
+ {
1039
+ if (BNXT_PF (bp ))
1040
+ flush_workqueue (bnxt_pf_wq );
1041
+ else
1042
+ cancel_work_sync (& bp -> sp_task );
1043
+ }
1044
+
1027
1045
static void bnxt_sched_reset (struct bnxt * bp , struct bnxt_rx_ring_info * rxr )
1028
1046
{
1029
1047
if (!rxr -> bnapi -> in_reset ) {
1030
1048
rxr -> bnapi -> in_reset = true;
1031
1049
set_bit (BNXT_RESET_TASK_SP_EVENT , & bp -> sp_event );
1032
- schedule_work ( & bp -> sp_task );
1050
+ bnxt_queue_sp_work ( bp );
1033
1051
}
1034
1052
rxr -> rx_next_cons = 0xffff ;
1035
1053
}
@@ -1717,7 +1735,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
1717
1735
default :
1718
1736
goto async_event_process_exit ;
1719
1737
}
1720
- schedule_work ( & bp -> sp_task );
1738
+ bnxt_queue_sp_work ( bp );
1721
1739
async_event_process_exit :
1722
1740
bnxt_ulp_async_events (bp , cmpl );
1723
1741
return 0 ;
@@ -1751,7 +1769,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1751
1769
1752
1770
set_bit (vf_id - bp -> pf .first_vf_id , bp -> pf .vf_event_bmap );
1753
1771
set_bit (BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT , & bp -> sp_event );
1754
- schedule_work ( & bp -> sp_task );
1772
+ bnxt_queue_sp_work ( bp );
1755
1773
break ;
1756
1774
1757
1775
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT :
@@ -6647,7 +6665,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
6647
6665
vnic -> rx_mask = mask ;
6648
6666
6649
6667
set_bit (BNXT_RX_MASK_SP_EVENT , & bp -> sp_event );
6650
- schedule_work ( & bp -> sp_task );
6668
+ bnxt_queue_sp_work ( bp );
6651
6669
}
6652
6670
}
6653
6671
@@ -6920,7 +6938,7 @@ static void bnxt_tx_timeout(struct net_device *dev)
6920
6938
6921
6939
netdev_err (bp -> dev , "TX timeout detected, starting reset task!\n" );
6922
6940
set_bit (BNXT_RESET_TASK_SP_EVENT , & bp -> sp_event );
6923
- schedule_work ( & bp -> sp_task );
6941
+ bnxt_queue_sp_work ( bp );
6924
6942
}
6925
6943
6926
6944
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6952,7 +6970,7 @@ static void bnxt_timer(unsigned long data)
6952
6970
if (bp -> link_info .link_up && (bp -> flags & BNXT_FLAG_PORT_STATS ) &&
6953
6971
bp -> stats_coal_ticks ) {
6954
6972
set_bit (BNXT_PERIODIC_STATS_SP_EVENT , & bp -> sp_event );
6955
- schedule_work ( & bp -> sp_task );
6973
+ bnxt_queue_sp_work ( bp );
6956
6974
}
6957
6975
bnxt_restart_timer :
6958
6976
mod_timer (& bp -> timer , jiffies + bp -> current_interval );
@@ -7433,7 +7451,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
7433
7451
spin_unlock_bh (& bp -> ntp_fltr_lock );
7434
7452
7435
7453
set_bit (BNXT_RX_NTP_FLTR_SP_EVENT , & bp -> sp_event );
7436
- schedule_work ( & bp -> sp_task );
7454
+ bnxt_queue_sp_work ( bp );
7437
7455
7438
7456
return new_fltr -> sw_id ;
7439
7457
@@ -7516,7 +7534,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
7516
7534
if (bp -> vxlan_port_cnt == 1 ) {
7517
7535
bp -> vxlan_port = ti -> port ;
7518
7536
set_bit (BNXT_VXLAN_ADD_PORT_SP_EVENT , & bp -> sp_event );
7519
- schedule_work ( & bp -> sp_task );
7537
+ bnxt_queue_sp_work ( bp );
7520
7538
}
7521
7539
break ;
7522
7540
case UDP_TUNNEL_TYPE_GENEVE :
@@ -7533,7 +7551,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
7533
7551
return ;
7534
7552
}
7535
7553
7536
- schedule_work ( & bp -> sp_task );
7554
+ bnxt_queue_sp_work ( bp );
7537
7555
}
7538
7556
7539
7557
static void bnxt_udp_tunnel_del (struct net_device * dev ,
@@ -7572,7 +7590,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
7572
7590
return ;
7573
7591
}
7574
7592
7575
- schedule_work ( & bp -> sp_task );
7593
+ bnxt_queue_sp_work ( bp );
7576
7594
}
7577
7595
7578
7596
static int bnxt_bridge_getlink (struct sk_buff * skb , u32 pid , u32 seq ,
@@ -7720,7 +7738,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
7720
7738
pci_disable_pcie_error_reporting (pdev );
7721
7739
unregister_netdev (dev );
7722
7740
bnxt_shutdown_tc (bp );
7723
- cancel_work_sync ( & bp -> sp_task );
7741
+ bnxt_cancel_sp_work ( bp );
7724
7742
bp -> sp_event = 0 ;
7725
7743
7726
7744
bnxt_clear_int_mode (bp );
@@ -8138,8 +8156,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8138
8156
else
8139
8157
device_set_wakeup_capable (& pdev -> dev , false);
8140
8158
8141
- if (BNXT_PF (bp ))
8159
+ if (BNXT_PF (bp )) {
8160
+ if (!bnxt_pf_wq ) {
8161
+ bnxt_pf_wq =
8162
+ create_singlethread_workqueue ("bnxt_pf_wq" );
8163
+ if (!bnxt_pf_wq ) {
8164
+ dev_err (& pdev -> dev , "Unable to create workqueue.\n" );
8165
+ goto init_err_pci_clean ;
8166
+ }
8167
+ }
8142
8168
bnxt_init_tc (bp );
8169
+ }
8143
8170
8144
8171
rc = register_netdev (dev );
8145
8172
if (rc )
@@ -8375,4 +8402,17 @@ static struct pci_driver bnxt_pci_driver = {
8375
8402
#endif
8376
8403
};
8377
8404
8378
- module_pci_driver (bnxt_pci_driver );
8405
+ static int __init bnxt_init (void )
8406
+ {
8407
+ return pci_register_driver (& bnxt_pci_driver );
8408
+ }
8409
+
8410
+ static void __exit bnxt_exit (void )
8411
+ {
8412
+ pci_unregister_driver (& bnxt_pci_driver );
8413
+ if (bnxt_pf_wq )
8414
+ destroy_workqueue (bnxt_pf_wq );
8415
+ }
8416
+
8417
+ module_init (bnxt_init );
8418
+ module_exit (bnxt_exit );
0 commit comments