mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
RDMA v6.15 first rc pull request
- Hang in bnxt_re due to miscomputing the budget - Avoid a -Wformat-security message in dev_set_name() - Avoid an unused definition warning in fs.c with some kconfigs - Fix error handling in usnic and remove IS_ERR_OR_NULL() usage - Regression in RXE support foudn by blktests due to missing ODP exclusions - Set the dma_segment_size on HNS so it doesn't corrupt DMA when using very large IOs - Move a INIT_WORK to near when the work is allocated in cm.c to fix a racey crash where work in progress was being init'd - Use __GFP_NOWARN to not dump in kvcalloc() if userspace requests a very big MR -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCZ/0D0AAKCRCFwuHvBreF YTe/AP4lyGMTbP7qwbgA8wSOIyYVjrdiNfP/vtvwcSp6Aw/7DgD/e3Ia3KNUQH/3 3L6gGnf58PGVjBArED7bdzZCcCrCQwM= =XZ1j -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: - Fix hang in bnxt_re due to miscomputing the budget - Avoid a -Wformat-security message in dev_set_name() - Avoid an unused definition warning in fs.c with some kconfigs - Fix error handling in usnic and remove IS_ERR_OR_NULL() usage - Regression in RXE support foudn by blktests due to missing ODP exclusions - Set the dma_segment_size on HNS so it doesn't corrupt DMA when using very large IOs - Move a INIT_WORK to near when the work is allocated in cm.c to fix a racey crash where work in progress was being init'd - Use __GFP_NOWARN to not dump in kvcalloc() if userspace requests a very big MR * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/bnxt_re: Remove unusable nq variable RDMA/core: Silence oversized kvmalloc() warning RDMA/cma: Fix workqueue crash in cma_netevent_work_handler RDMA/hns: Fix wrong maximum DMA segment size RDMA/rxe: Fix null pointer dereference in ODP MR check RDMA/mlx5: Fix compilation warning when USER_ACCESS isn't set RDMA/usnic: Fix passing zero to PTR_ERR in usnic_ib_pci_probe() RDMA/ucaps: Avoid format-security warning RDMA/bnxt_re: Fix budget handling of notification queue
This commit is contained in:
commit
834a4a6896
@ -72,6 +72,8 @@ static const char * const cma_events[] = {
|
||||
static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
|
||||
enum ib_gid_type gid_type);
|
||||
|
||||
static void cma_netevent_work_handler(struct work_struct *_work);
|
||||
|
||||
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
|
||||
{
|
||||
size_t index = event;
|
||||
@ -1047,6 +1049,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
|
||||
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
|
||||
id_priv->id.route.addr.dev_addr.net = get_net(net);
|
||||
id_priv->seq_num &= 0x00ffffff;
|
||||
INIT_WORK(&id_priv->id.net_work, cma_netevent_work_handler);
|
||||
|
||||
rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
|
||||
if (parent)
|
||||
@ -5241,7 +5244,6 @@ static int cma_netevent_callback(struct notifier_block *self,
|
||||
if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
|
||||
neigh->ha, ETH_ALEN))
|
||||
continue;
|
||||
INIT_WORK(¤t_id->id.net_work, cma_netevent_work_handler);
|
||||
cma_id_get(current_id);
|
||||
queue_work(cma_wq, ¤t_id->id.net_work);
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ int ib_create_ucap(enum rdma_user_cap type)
|
||||
ucap->dev.class = &ucaps_class;
|
||||
ucap->dev.devt = MKDEV(MAJOR(ucaps_base_dev), type);
|
||||
ucap->dev.release = ucap_dev_release;
|
||||
ret = dev_set_name(&ucap->dev, ucap_names[type]);
|
||||
ret = dev_set_name(&ucap->dev, "%s", ucap_names[type]);
|
||||
if (ret)
|
||||
goto err_device;
|
||||
|
||||
|
@ -76,12 +76,14 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
|
||||
|
||||
npfns = (end - start) >> PAGE_SHIFT;
|
||||
umem_odp->pfn_list = kvcalloc(
|
||||
npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
|
||||
npfns, sizeof(*umem_odp->pfn_list),
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!umem_odp->pfn_list)
|
||||
return -ENOMEM;
|
||||
|
||||
umem_odp->dma_list = kvcalloc(
|
||||
ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL);
|
||||
ndmas, sizeof(*umem_odp->dma_list),
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!umem_odp->dma_list) {
|
||||
ret = -ENOMEM;
|
||||
goto out_pfn_list;
|
||||
|
@ -1774,10 +1774,7 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
|
||||
ib_srq);
|
||||
struct bnxt_re_dev *rdev = srq->rdev;
|
||||
struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
|
||||
struct bnxt_qplib_nq *nq = NULL;
|
||||
|
||||
if (qplib_srq->cq)
|
||||
nq = qplib_srq->cq->nq;
|
||||
if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
|
||||
free_page((unsigned long)srq->uctx_srq_page);
|
||||
hash_del(&srq->hash_entry);
|
||||
@ -1785,8 +1782,6 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
|
||||
bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
|
||||
ib_umem_release(srq->umem);
|
||||
atomic_dec(&rdev->stats.res.srq_count);
|
||||
if (nq)
|
||||
nq->budget--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1827,7 +1822,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct bnxt_qplib_dev_attr *dev_attr;
|
||||
struct bnxt_qplib_nq *nq = NULL;
|
||||
struct bnxt_re_ucontext *uctx;
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct bnxt_re_srq *srq;
|
||||
@ -1873,7 +1867,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
|
||||
srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
|
||||
srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
|
||||
srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
|
||||
nq = &rdev->nqr->nq[0];
|
||||
|
||||
if (udata) {
|
||||
rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
|
||||
@ -1908,8 +1901,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
if (nq)
|
||||
nq->budget++;
|
||||
active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
|
||||
if (active_srqs > rdev->stats.res.srq_watermark)
|
||||
rdev->stats.res.srq_watermark = active_srqs;
|
||||
@ -3079,7 +3070,6 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
ib_umem_release(cq->umem);
|
||||
|
||||
atomic_dec(&rdev->stats.res.cq_count);
|
||||
nq->budget--;
|
||||
kfree(cq->cql);
|
||||
return 0;
|
||||
}
|
||||
|
@ -763,7 +763,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
dma_set_max_seg_size(dev, UINT_MAX);
|
||||
dma_set_max_seg_size(dev, SZ_2G);
|
||||
ret = ib_register_device(ib_dev, "hns_%d", dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "ib_register_device failed!\n");
|
||||
|
@ -3461,7 +3461,6 @@ DECLARE_UVERBS_NAMED_OBJECT(
|
||||
&UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY));
|
||||
|
||||
const struct uapi_definition mlx5_ib_flow_defs[] = {
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
|
||||
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
|
||||
MLX5_IB_OBJECT_FLOW_MATCHER),
|
||||
UAPI_DEF_CHAIN_OBJ_TREE(
|
||||
@ -3472,7 +3471,6 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
|
||||
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
|
||||
MLX5_IB_OBJECT_STEERING_ANCHOR,
|
||||
UAPI_DEF_IS_OBJ_SUPPORTED(mlx5_ib_shared_ft_allowed)),
|
||||
#endif
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -397,7 +397,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
|
||||
if (!us_ibdev) {
|
||||
usnic_err("Device %s context alloc failed\n",
|
||||
netdev_name(pci_get_drvdata(dev)));
|
||||
return ERR_PTR(-EFAULT);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
|
||||
@ -517,8 +517,8 @@ static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
|
||||
}
|
||||
|
||||
us_ibdev = usnic_ib_device_add(parent_pci);
|
||||
if (IS_ERR_OR_NULL(us_ibdev)) {
|
||||
us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
|
||||
if (!us_ibdev) {
|
||||
us_ibdev = ERR_PTR(-EFAULT);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -586,10 +586,10 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
|
||||
}
|
||||
|
||||
pf = usnic_ib_discover_pf(vf->vnic);
|
||||
if (IS_ERR_OR_NULL(pf)) {
|
||||
usnic_err("Failed to discover pf of vnic %s with err%ld\n",
|
||||
pci_name(pdev), PTR_ERR(pf));
|
||||
err = pf ? PTR_ERR(pf) : -EFAULT;
|
||||
if (IS_ERR(pf)) {
|
||||
err = PTR_ERR(pf);
|
||||
usnic_err("Failed to discover pf of vnic %s with err%d\n",
|
||||
pci_name(pdev), err);
|
||||
goto out_clean_vnic;
|
||||
}
|
||||
|
||||
|
@ -140,6 +140,12 @@ static inline int qp_mtu(struct rxe_qp *qp)
|
||||
return IB_MTU_4096;
|
||||
}
|
||||
|
||||
static inline bool is_odp_mr(struct rxe_mr *mr)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
|
||||
mr->umem->is_odp;
|
||||
}
|
||||
|
||||
void free_rd_atomic_resource(struct resp_res *res);
|
||||
|
||||
static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
|
||||
|
@ -323,7 +323,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
|
||||
return err;
|
||||
}
|
||||
|
||||
if (mr->umem->is_odp)
|
||||
if (is_odp_mr(mr))
|
||||
return rxe_odp_mr_copy(mr, iova, addr, length, dir);
|
||||
else
|
||||
return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
|
||||
@ -536,7 +536,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
|
||||
u64 *va;
|
||||
|
||||
/* ODP is not supported right now. WIP. */
|
||||
if (mr->umem->is_odp)
|
||||
if (is_odp_mr(mr))
|
||||
return RESPST_ERR_UNSUPPORTED_OPCODE;
|
||||
|
||||
/* See IBA oA19-28 */
|
||||
|
@ -650,7 +650,7 @@ static enum resp_states process_flush(struct rxe_qp *qp,
|
||||
struct resp_res *res = qp->resp.res;
|
||||
|
||||
/* ODP is not supported right now. WIP. */
|
||||
if (mr->umem->is_odp)
|
||||
if (is_odp_mr(mr))
|
||||
return RESPST_ERR_UNSUPPORTED_OPCODE;
|
||||
|
||||
/* oA19-14, oA19-15 */
|
||||
@ -706,7 +706,7 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
|
||||
if (!res->replay) {
|
||||
u64 iova = qp->resp.va + qp->resp.offset;
|
||||
|
||||
if (mr->umem->is_odp)
|
||||
if (is_odp_mr(mr))
|
||||
err = rxe_odp_atomic_op(mr, iova, pkt->opcode,
|
||||
atmeth_comp(pkt),
|
||||
atmeth_swap_add(pkt),
|
||||
|
@ -4790,7 +4790,14 @@ void roce_del_all_netdev_gids(struct ib_device *ib_dev,
|
||||
|
||||
struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
|
||||
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
|
||||
#else
|
||||
static inline int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
|
||||
enum rdma_netdev_t type, const char *name,
|
||||
|
Loading…
x
Reference in New Issue
Block a user