Skip to content

Commit 9603b61

Browse files
Jack Morgensteindavem330
authored andcommitted
mlx5: Move pci device handling from mlx5_ib to mlx5_core
In preparation for a new mlx5 device which is VPI (i.e., ports can be either IB or ETH), move the pci device functionality from mlx5_ib to mlx5_core. This involves the following changes: 1. Move mlx5_core_dev struct out of mlx5_ib_dev. mlx5_core_dev is now an independent structure maintained by mlx5_core. mlx5_ib_dev now has a pointer to that struct. This requires changing a lot of places where the core_dev struct was accessed via mlx5_ib_dev (now, this needs to be a pointer dereference). 2. All PCI initializations are now done in mlx5_core. Thus, it is now mlx5_core which does pci_register_device (and not mlx5_ib, as was previously). 3. mlx5_ib now registers itself with mlx5_core as an "interface" driver. This is very similar to the mechanism employed for the mlx4 (ConnectX) driver. Once the HCA is initialized (by mlx5_core), it invokes the interface drivers to do their initializations. 4. There is a new event handler which the core registers: mlx5_core_event(). This event handler invokes the event handlers registered by the interfaces. Based on a patch by Eli Cohen <[email protected]> Signed-off-by: Jack Morgenstein <[email protected]> Signed-off-by: Eli Cohen <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 4ada97a commit 9603b61

File tree

9 files changed

+498
-310
lines changed

9 files changed

+498
-310
lines changed

drivers/infiniband/hw/mlx5/cq.c

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
180180
struct mlx5_core_srq *msrq = NULL;
181181

182182
if (qp->ibqp.xrcd) {
183-
msrq = mlx5_core_get_srq(&dev->mdev,
183+
msrq = mlx5_core_get_srq(dev->mdev,
184184
be32_to_cpu(cqe->srqn));
185185
srq = to_mibsrq(msrq);
186186
} else {
@@ -364,7 +364,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
364364

365365
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
366366
{
367-
mlx5_buf_free(&dev->mdev, &buf->buf);
367+
mlx5_buf_free(dev->mdev, &buf->buf);
368368
}
369369

370370
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@@ -450,7 +450,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
450450
* because CQs will be locked while QPs are removed
451451
* from the table.
452452
*/
453-
mqp = __mlx5_qp_lookup(&dev->mdev, qpn);
453+
mqp = __mlx5_qp_lookup(dev->mdev, qpn);
454454
if (unlikely(!mqp)) {
455455
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
456456
cq->mcq.cqn, qpn);
@@ -514,11 +514,11 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
514514
case MLX5_CQE_SIG_ERR:
515515
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
516516

517-
read_lock(&dev->mdev.priv.mr_table.lock);
518-
mmr = __mlx5_mr_lookup(&dev->mdev,
517+
read_lock(&dev->mdev->priv.mr_table.lock);
518+
mmr = __mlx5_mr_lookup(dev->mdev,
519519
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
520520
if (unlikely(!mmr)) {
521-
read_unlock(&dev->mdev.priv.mr_table.lock);
521+
read_unlock(&dev->mdev->priv.mr_table.lock);
522522
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
523523
cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
524524
return -EINVAL;
@@ -536,7 +536,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
536536
mr->sig->err_item.expected,
537537
mr->sig->err_item.actual);
538538

539-
read_unlock(&dev->mdev.priv.mr_table.lock);
539+
read_unlock(&dev->mdev->priv.mr_table.lock);
540540
goto repoll;
541541
}
542542

@@ -575,8 +575,8 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
575575
mlx5_cq_arm(&to_mcq(ibcq)->mcq,
576576
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
577577
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
578-
to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map,
579-
MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock));
578+
to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
579+
MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));
580580

581581
return 0;
582582
}
@@ -586,7 +586,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
586586
{
587587
int err;
588588

589-
err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size,
589+
err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
590590
PAGE_SIZE * 2, &buf->buf);
591591
if (err)
592592
return err;
@@ -691,7 +691,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
691691
{
692692
int err;
693693

694-
err = mlx5_db_alloc(&dev->mdev, &cq->db);
694+
err = mlx5_db_alloc(dev->mdev, &cq->db);
695695
if (err)
696696
return err;
697697

@@ -716,22 +716,22 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
716716
mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
717717

718718
(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
719-
*index = dev->mdev.priv.uuari.uars[0].index;
719+
*index = dev->mdev->priv.uuari.uars[0].index;
720720

721721
return 0;
722722

723723
err_buf:
724724
free_cq_buf(dev, &cq->buf);
725725

726726
err_db:
727-
mlx5_db_free(&dev->mdev, &cq->db);
727+
mlx5_db_free(dev->mdev, &cq->db);
728728
return err;
729729
}
730730

731731
static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
732732
{
733733
free_cq_buf(dev, &cq->buf);
734-
mlx5_db_free(&dev->mdev, &cq->db);
734+
mlx5_db_free(dev->mdev, &cq->db);
735735
}
736736

737737
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
752752
return ERR_PTR(-EINVAL);
753753

754754
entries = roundup_pow_of_two(entries + 1);
755-
if (entries > dev->mdev.caps.max_cqes)
755+
if (entries > dev->mdev->caps.max_cqes)
756756
return ERR_PTR(-EINVAL);
757757

758758
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -789,7 +789,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
789789
cqb->ctx.c_eqn = cpu_to_be16(eqn);
790790
cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
791791

792-
err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen);
792+
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
793793
if (err)
794794
goto err_cqb;
795795

@@ -809,7 +809,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
809809
return &cq->ibcq;
810810

811811
err_cmd:
812-
mlx5_core_destroy_cq(&dev->mdev, &cq->mcq);
812+
mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
813813

814814
err_cqb:
815815
mlx5_vfree(cqb);
@@ -834,7 +834,7 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq)
834834
if (cq->uobject)
835835
context = cq->uobject->context;
836836

837-
mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq);
837+
mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
838838
if (context)
839839
destroy_cq_user(mcq, context);
840840
else
@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
919919
int err;
920920
u32 fsel;
921921

922-
if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
922+
if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
923923
return -ENOSYS;
924924

925925
in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -931,7 +931,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
931931
in->ctx.cq_period = cpu_to_be16(cq_period);
932932
in->ctx.cq_max_count = cpu_to_be16(cq_count);
933933
in->field_select = cpu_to_be32(fsel);
934-
err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in));
934+
err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
935935
kfree(in);
936936

937937
if (err)
@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
10741074
int uninitialized_var(cqe_size);
10751075
unsigned long flags;
10761076

1077-
if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
1077+
if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
10781078
pr_info("Firmware does not support resize CQ\n");
10791079
return -ENOSYS;
10801080
}
@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
10831083
return -EINVAL;
10841084

10851085
entries = roundup_pow_of_two(entries + 1);
1086-
if (entries > dev->mdev.caps.max_cqes + 1)
1086+
if (entries > dev->mdev->caps.max_cqes + 1)
10871087
return -EINVAL;
10881088

10891089
if (entries == ibcq->cqe + 1)
@@ -1128,7 +1128,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
11281128
in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
11291129
in->cqn = cpu_to_be32(cq->mcq.cqn);
11301130

1131-
err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen);
1131+
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
11321132
if (err)
11331133
goto ex_alloc;
11341134

drivers/infiniband/hw/mlx5/mad.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
5454
if (ignore_bkey || !in_wc)
5555
op_modifier |= 0x2;
5656

57-
return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port);
57+
return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
5858
}
5959

6060
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
129129

130130
packet_error = be16_to_cpu(out_mad->status);
131131

132-
dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
132+
dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
133133
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
134134

135135
out:

0 commit comments

Comments
 (0)