@@ -180,7 +180,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
180
180
struct mlx5_core_srq * msrq = NULL ;
181
181
182
182
if (qp -> ibqp .xrcd ) {
183
- msrq = mlx5_core_get_srq (& dev -> mdev ,
183
+ msrq = mlx5_core_get_srq (dev -> mdev ,
184
184
be32_to_cpu (cqe -> srqn ));
185
185
srq = to_mibsrq (msrq );
186
186
} else {
@@ -364,7 +364,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
364
364
365
365
static void free_cq_buf (struct mlx5_ib_dev * dev , struct mlx5_ib_cq_buf * buf )
366
366
{
367
- mlx5_buf_free (& dev -> mdev , & buf -> buf );
367
+ mlx5_buf_free (dev -> mdev , & buf -> buf );
368
368
}
369
369
370
370
static void get_sig_err_item (struct mlx5_sig_err_cqe * cqe ,
@@ -450,7 +450,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
450
450
* because CQs will be locked while QPs are removed
451
451
* from the table.
452
452
*/
453
- mqp = __mlx5_qp_lookup (& dev -> mdev , qpn );
453
+ mqp = __mlx5_qp_lookup (dev -> mdev , qpn );
454
454
if (unlikely (!mqp )) {
455
455
mlx5_ib_warn (dev , "CQE@CQ %06x for unknown QPN %6x\n" ,
456
456
cq -> mcq .cqn , qpn );
@@ -514,11 +514,11 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
514
514
case MLX5_CQE_SIG_ERR :
515
515
sig_err_cqe = (struct mlx5_sig_err_cqe * )cqe64 ;
516
516
517
- read_lock (& dev -> mdev . priv .mr_table .lock );
518
- mmr = __mlx5_mr_lookup (& dev -> mdev ,
517
+ read_lock (& dev -> mdev -> priv .mr_table .lock );
518
+ mmr = __mlx5_mr_lookup (dev -> mdev ,
519
519
mlx5_base_mkey (be32_to_cpu (sig_err_cqe -> mkey )));
520
520
if (unlikely (!mmr )) {
521
- read_unlock (& dev -> mdev . priv .mr_table .lock );
521
+ read_unlock (& dev -> mdev -> priv .mr_table .lock );
522
522
mlx5_ib_warn (dev , "CQE@CQ %06x for unknown MR %6x\n" ,
523
523
cq -> mcq .cqn , be32_to_cpu (sig_err_cqe -> mkey ));
524
524
return - EINVAL ;
@@ -536,7 +536,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
536
536
mr -> sig -> err_item .expected ,
537
537
mr -> sig -> err_item .actual );
538
538
539
- read_unlock (& dev -> mdev . priv .mr_table .lock );
539
+ read_unlock (& dev -> mdev -> priv .mr_table .lock );
540
540
goto repoll ;
541
541
}
542
542
@@ -575,8 +575,8 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
575
575
mlx5_cq_arm (& to_mcq (ibcq )-> mcq ,
576
576
(flags & IB_CQ_SOLICITED_MASK ) == IB_CQ_SOLICITED ?
577
577
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT ,
578
- to_mdev (ibcq -> device )-> mdev . priv .uuari .uars [0 ].map ,
579
- MLX5_GET_DOORBELL_LOCK (& to_mdev (ibcq -> device )-> mdev . priv .cq_uar_lock ));
578
+ to_mdev (ibcq -> device )-> mdev -> priv .uuari .uars [0 ].map ,
579
+ MLX5_GET_DOORBELL_LOCK (& to_mdev (ibcq -> device )-> mdev -> priv .cq_uar_lock ));
580
580
581
581
return 0 ;
582
582
}
@@ -586,7 +586,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
586
586
{
587
587
int err ;
588
588
589
- err = mlx5_buf_alloc (& dev -> mdev , nent * cqe_size ,
589
+ err = mlx5_buf_alloc (dev -> mdev , nent * cqe_size ,
590
590
PAGE_SIZE * 2 , & buf -> buf );
591
591
if (err )
592
592
return err ;
@@ -691,7 +691,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
691
691
{
692
692
int err ;
693
693
694
- err = mlx5_db_alloc (& dev -> mdev , & cq -> db );
694
+ err = mlx5_db_alloc (dev -> mdev , & cq -> db );
695
695
if (err )
696
696
return err ;
697
697
@@ -716,22 +716,22 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
716
716
mlx5_fill_page_array (& cq -> buf .buf , (* cqb )-> pas );
717
717
718
718
(* cqb )-> ctx .log_pg_sz = cq -> buf .buf .page_shift - MLX5_ADAPTER_PAGE_SHIFT ;
719
- * index = dev -> mdev . priv .uuari .uars [0 ].index ;
719
+ * index = dev -> mdev -> priv .uuari .uars [0 ].index ;
720
720
721
721
return 0 ;
722
722
723
723
err_buf :
724
724
free_cq_buf (dev , & cq -> buf );
725
725
726
726
err_db :
727
- mlx5_db_free (& dev -> mdev , & cq -> db );
727
+ mlx5_db_free (dev -> mdev , & cq -> db );
728
728
return err ;
729
729
}
730
730
731
731
static void destroy_cq_kernel (struct mlx5_ib_dev * dev , struct mlx5_ib_cq * cq )
732
732
{
733
733
free_cq_buf (dev , & cq -> buf );
734
- mlx5_db_free (& dev -> mdev , & cq -> db );
734
+ mlx5_db_free (dev -> mdev , & cq -> db );
735
735
}
736
736
737
737
struct ib_cq * mlx5_ib_create_cq (struct ib_device * ibdev , int entries ,
@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
752
752
return ERR_PTR (- EINVAL );
753
753
754
754
entries = roundup_pow_of_two (entries + 1 );
755
- if (entries > dev -> mdev . caps .max_cqes )
755
+ if (entries > dev -> mdev -> caps .max_cqes )
756
756
return ERR_PTR (- EINVAL );
757
757
758
758
cq = kzalloc (sizeof (* cq ), GFP_KERNEL );
@@ -789,7 +789,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
789
789
cqb -> ctx .c_eqn = cpu_to_be16 (eqn );
790
790
cqb -> ctx .db_record_addr = cpu_to_be64 (cq -> db .dma );
791
791
792
- err = mlx5_core_create_cq (& dev -> mdev , & cq -> mcq , cqb , inlen );
792
+ err = mlx5_core_create_cq (dev -> mdev , & cq -> mcq , cqb , inlen );
793
793
if (err )
794
794
goto err_cqb ;
795
795
@@ -809,7 +809,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
809
809
return & cq -> ibcq ;
810
810
811
811
err_cmd :
812
- mlx5_core_destroy_cq (& dev -> mdev , & cq -> mcq );
812
+ mlx5_core_destroy_cq (dev -> mdev , & cq -> mcq );
813
813
814
814
err_cqb :
815
815
mlx5_vfree (cqb );
@@ -834,7 +834,7 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq)
834
834
if (cq -> uobject )
835
835
context = cq -> uobject -> context ;
836
836
837
- mlx5_core_destroy_cq (& dev -> mdev , & mcq -> mcq );
837
+ mlx5_core_destroy_cq (dev -> mdev , & mcq -> mcq );
838
838
if (context )
839
839
destroy_cq_user (mcq , context );
840
840
else
@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
919
919
int err ;
920
920
u32 fsel ;
921
921
922
- if (!(dev -> mdev . caps .flags & MLX5_DEV_CAP_FLAG_CQ_MODER ))
922
+ if (!(dev -> mdev -> caps .flags & MLX5_DEV_CAP_FLAG_CQ_MODER ))
923
923
return - ENOSYS ;
924
924
925
925
in = kzalloc (sizeof (* in ), GFP_KERNEL );
@@ -931,7 +931,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
931
931
in -> ctx .cq_period = cpu_to_be16 (cq_period );
932
932
in -> ctx .cq_max_count = cpu_to_be16 (cq_count );
933
933
in -> field_select = cpu_to_be32 (fsel );
934
- err = mlx5_core_modify_cq (& dev -> mdev , & mcq -> mcq , in , sizeof (* in ));
934
+ err = mlx5_core_modify_cq (dev -> mdev , & mcq -> mcq , in , sizeof (* in ));
935
935
kfree (in );
936
936
937
937
if (err )
@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1074
1074
int uninitialized_var (cqe_size );
1075
1075
unsigned long flags ;
1076
1076
1077
- if (!(dev -> mdev . caps .flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ )) {
1077
+ if (!(dev -> mdev -> caps .flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ )) {
1078
1078
pr_info ("Firmware does not support resize CQ\n" );
1079
1079
return - ENOSYS ;
1080
1080
}
@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1083
1083
return - EINVAL ;
1084
1084
1085
1085
entries = roundup_pow_of_two (entries + 1 );
1086
- if (entries > dev -> mdev . caps .max_cqes + 1 )
1086
+ if (entries > dev -> mdev -> caps .max_cqes + 1 )
1087
1087
return - EINVAL ;
1088
1088
1089
1089
if (entries == ibcq -> cqe + 1 )
@@ -1128,7 +1128,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1128
1128
in -> hdr .opmod = cpu_to_be16 (MLX5_CQ_OPMOD_RESIZE );
1129
1129
in -> cqn = cpu_to_be32 (cq -> mcq .cqn );
1130
1130
1131
- err = mlx5_core_modify_cq (& dev -> mdev , & cq -> mcq , in , inlen );
1131
+ err = mlx5_core_modify_cq (dev -> mdev , & cq -> mcq , in , inlen );
1132
1132
if (err )
1133
1133
goto ex_alloc ;
1134
1134
0 commit comments