mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
block-6.14-20250306
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmfKQvsQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpnBCD/9bVSGHNnXakVwdpQmtU5zy54cyWd7VaYsz qeM+Vrl1m5nf8q5ZdEXcM11Ruib3YJiW0GN9d9sWpTwt8C5n8g+8F63koS7GordZ jcv77nO6FlnwWpm3YlNxAeLuxkl15e4MQIKj/jb540iFygzT8H2lygE816K4kpCX XuMxNxdSMksntovZufzxo3Sfkm6e6GChCkkqvBxuXiEWFhvbFQ/ZLEsEMtoH4hkI 3Nj1VB3B3pLVCZhWr2uVvcZCiYUDyBslu+SA3RRoX0W6beK1cVI4OQdS8GtnkJf3 qFnLQz0Ib3EVDtugqg7ZGSAAov6Z8waA2MrFeZkG8uIfl4WT3kBfoan7jRX3Mknl VnFkThyJOzB83OKqlZKjCzYmEzBhKJrRJVtneIrxT+gvEpevFvAQil6SQfyPDwno 4YcUD+IfU/daTdVR58QQ/iLzkQ7stQWYCtZSrICKfcAGy6zswKM5P5uoWltMBwQh aHsyz9xbmsMrxch1DPRb0T2GD2h9BsiL6rT8JCrOgucMuOYeZL9pNRgz16D/hael wBCxPcanSdap0N9kiMX8fLYYdmRxpJHzTbeNRsPhZe8HKUPu1sYTbisOou1XSdAW Dv7zeQWVlw+1cn/S1Y6Oc4mdlPzPTA9szuBXVpbe9Gd7ZqO7sbbKEkGu5w6MGSZ1 oubnZKCNvA== =jKDe -----END PGP SIGNATURE----- Merge tag 'block-6.14-20250306' of git://git.kernel.dk/linux Pull block fixes from Jens Axboe: - NVMe pull request via Keith: - TCP use after free fix on polling (Sagi) - Controller memory buffer cleanup fixes (Icenowy) - Free leaking requests on bad user passthrough commands (Keith) - TCP error message fix (Maurizio) - TCP corruption fix on partial PDU (Maurizio) - TCP memory ordering fix for weakly ordered archs (Meir) - Type coercion fix on message error for TCP (Dan) - Name the RQF flags enum, fixing issues with anon enums and BPF import of it - ublk parameter setting fix - GPT partition 7-bit conversion fix * tag 'block-6.14-20250306' of git://git.kernel.dk/linux: block: Name the RQF flags enum nvme-tcp: fix signedness bug in nvme_tcp_init_connection() block: fix conversion of GPT partition name to 7-bit ublk: set_params: properly check if parameters can be applied nvmet-tcp: Fix a possible sporadic response drops in weakly ordered arch nvme-tcp: fix potential memory corruption in nvme_tcp_recv_pdu() nvme-tcp: Fix a C2HTermReq error message nvmet: remove old function prototype nvme-ioctl: fix leaked requests on mapping error nvme-pci: skip CMB blocks incompatible with PCI P2P DMA nvme-pci: clean up CMBMSC when registering CMB fails nvme-tcp: fix possible UAF in nvme_tcp_poll
This commit is contained in:
commit
381af8d9f4
@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
|
||||
out[size] = 0;
|
||||
|
||||
while (i < size) {
|
||||
u8 c = le16_to_cpu(in[i]) & 0xff;
|
||||
u8 c = le16_to_cpu(in[i]) & 0x7f;
|
||||
|
||||
if (c && !isprint(c))
|
||||
c = '!';
|
||||
|
@ -2715,9 +2715,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
|
||||
if (ph.len > sizeof(struct ublk_params))
|
||||
ph.len = sizeof(struct ublk_params);
|
||||
|
||||
/* parameters can only be changed when device isn't live */
|
||||
mutex_lock(&ub->mutex);
|
||||
if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
|
||||
if (test_bit(UB_STATE_USED, &ub->state)) {
|
||||
/*
|
||||
* Parameters can only be changed when device hasn't
|
||||
* been started yet
|
||||
*/
|
||||
ret = -EACCES;
|
||||
} else if (copy_from_user(&ub->params, argp, ph.len)) {
|
||||
ret = -EFAULT;
|
||||
|
@ -128,8 +128,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
||||
if (!nvme_ctrl_sgl_supported(ctrl))
|
||||
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
|
||||
if (has_metadata) {
|
||||
if (!supports_metadata)
|
||||
return -EINVAL;
|
||||
if (!supports_metadata) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (!nvme_ctrl_meta_sgl_supported(ctrl))
|
||||
dev_warn_once(ctrl->device,
|
||||
"using unchecked metadata buffer\n");
|
||||
@ -139,8 +141,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
||||
struct iov_iter iter;
|
||||
|
||||
/* fixedbufs is only for non-vectored io */
|
||||
if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
|
||||
return -EINVAL;
|
||||
if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
|
||||
rq_data_dir(req), &iter, ioucmd);
|
||||
if (ret < 0)
|
||||
|
@ -1982,6 +1982,18 @@ static void nvme_map_cmb(struct nvme_dev *dev)
|
||||
if (offset > bar_size)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Controllers may support a CMB size larger than their BAR, for
|
||||
* example, due to being behind a bridge. Reduce the CMB to the
|
||||
* reported size of the BAR
|
||||
*/
|
||||
size = min(size, bar_size - offset);
|
||||
|
||||
if (!IS_ALIGNED(size, memremap_compat_align()) ||
|
||||
!IS_ALIGNED(pci_resource_start(pdev, bar),
|
||||
memremap_compat_align()))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Tell the controller about the host side address mapping the CMB,
|
||||
* and enable CMB decoding for the NVMe 1.4+ scheme:
|
||||
@ -1992,17 +2004,10 @@ static void nvme_map_cmb(struct nvme_dev *dev)
|
||||
dev->bar + NVME_REG_CMBMSC);
|
||||
}
|
||||
|
||||
/*
|
||||
* Controllers may support a CMB size larger than their BAR,
|
||||
* for example, due to being behind a bridge. Reduce the CMB to
|
||||
* the reported size of the BAR
|
||||
*/
|
||||
if (size > bar_size - offset)
|
||||
size = bar_size - offset;
|
||||
|
||||
if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
|
||||
dev_warn(dev->ctrl.device,
|
||||
"failed to register the CMB\n");
|
||||
hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -217,6 +217,19 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
|
||||
return queue - queue->ctrl->queues;
|
||||
}
|
||||
|
||||
static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case nvme_tcp_c2h_term:
|
||||
case nvme_tcp_c2h_data:
|
||||
case nvme_tcp_r2t:
|
||||
case nvme_tcp_rsp:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the queue is TLS encrypted
|
||||
*/
|
||||
@ -775,7 +788,7 @@ static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
|
||||
[NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
|
||||
[NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
|
||||
[NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
|
||||
[NVME_TCP_FES_R2T_LIMIT_EXCEEDED] = "R2T Limit Exceeded",
|
||||
[NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded",
|
||||
[NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
|
||||
};
|
||||
|
||||
@ -818,6 +831,16 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
|
||||
return 0;
|
||||
|
||||
hdr = queue->pdu;
|
||||
if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) {
|
||||
if (!nvme_tcp_recv_pdu_supported(hdr->type))
|
||||
goto unsupported_pdu;
|
||||
|
||||
dev_err(queue->ctrl->ctrl.device,
|
||||
"pdu type %d has unexpected header length (%d)\n",
|
||||
hdr->type, hdr->hlen);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
|
||||
/*
|
||||
* C2HTermReq never includes Header or Data digests.
|
||||
@ -850,10 +873,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
|
||||
nvme_tcp_init_recv_ctx(queue);
|
||||
return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
|
||||
default:
|
||||
dev_err(queue->ctrl->ctrl.device,
|
||||
"unsupported pdu type (%d)\n", hdr->type);
|
||||
return -EINVAL;
|
||||
goto unsupported_pdu;
|
||||
}
|
||||
|
||||
unsupported_pdu:
|
||||
dev_err(queue->ctrl->ctrl.device,
|
||||
"unsupported pdu type (%d)\n", hdr->type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void nvme_tcp_end_request(struct request *rq, u16 status)
|
||||
@ -1495,11 +1521,11 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
|
||||
msg.msg_flags = MSG_WAITALL;
|
||||
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
|
||||
iov.iov_len, msg.msg_flags);
|
||||
if (ret < sizeof(*icresp)) {
|
||||
if (ret >= 0 && ret < sizeof(*icresp))
|
||||
ret = -ECONNRESET;
|
||||
if (ret < 0) {
|
||||
pr_warn("queue %d: failed to receive icresp, error %d\n",
|
||||
nvme_tcp_queue_id(queue), ret);
|
||||
if (ret >= 0)
|
||||
ret = -ECONNRESET;
|
||||
goto free_icresp;
|
||||
}
|
||||
ret = -ENOTCONN;
|
||||
@ -2699,6 +2725,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
|
||||
{
|
||||
struct nvme_tcp_queue *queue = hctx->driver_data;
|
||||
struct sock *sk = queue->sock->sk;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
|
||||
return 0;
|
||||
@ -2706,9 +2733,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
|
||||
set_bit(NVME_TCP_Q_POLLING, &queue->flags);
|
||||
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
|
||||
sk_busy_loop(sk, true);
|
||||
nvme_tcp_try_recv(queue);
|
||||
ret = nvme_tcp_try_recv(queue);
|
||||
clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
|
||||
return queue->nr_cqe;
|
||||
return ret < 0 ? ret : queue->nr_cqe;
|
||||
}
|
||||
|
||||
static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
|
||||
|
@ -647,7 +647,6 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
|
||||
struct nvmet_host *host);
|
||||
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
|
||||
u8 event_info, u8 log_page);
|
||||
bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
|
||||
|
||||
#define NVMET_MIN_QUEUE_SIZE 16
|
||||
#define NVMET_MAX_QUEUE_SIZE 1024
|
||||
|
@ -571,10 +571,16 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
|
||||
struct nvmet_tcp_cmd *cmd =
|
||||
container_of(req, struct nvmet_tcp_cmd, req);
|
||||
struct nvmet_tcp_queue *queue = cmd->queue;
|
||||
enum nvmet_tcp_recv_state queue_state;
|
||||
struct nvmet_tcp_cmd *queue_cmd;
|
||||
struct nvme_sgl_desc *sgl;
|
||||
u32 len;
|
||||
|
||||
if (unlikely(cmd == queue->cmd)) {
|
||||
/* Pairs with store_release in nvmet_prepare_receive_pdu() */
|
||||
queue_state = smp_load_acquire(&queue->rcv_state);
|
||||
queue_cmd = READ_ONCE(queue->cmd);
|
||||
|
||||
if (unlikely(cmd == queue_cmd)) {
|
||||
sgl = &cmd->req.cmd->common.dptr.sgl;
|
||||
len = le32_to_cpu(sgl->length);
|
||||
|
||||
@ -583,7 +589,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
|
||||
* Avoid using helpers, this might happen before
|
||||
* nvmet_req_init is completed.
|
||||
*/
|
||||
if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
|
||||
if (queue_state == NVMET_TCP_RECV_PDU &&
|
||||
len && len <= cmd->req.port->inline_data_size &&
|
||||
nvme_is_write(cmd->req.cmd))
|
||||
return;
|
||||
@ -847,8 +853,9 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
|
||||
{
|
||||
queue->offset = 0;
|
||||
queue->left = sizeof(struct nvme_tcp_hdr);
|
||||
queue->cmd = NULL;
|
||||
queue->rcv_state = NVMET_TCP_RECV_PDU;
|
||||
WRITE_ONCE(queue->cmd, NULL);
|
||||
/* Ensure rcv_state is visible only after queue->cmd is set */
|
||||
smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU);
|
||||
}
|
||||
|
||||
static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
|
||||
|
@ -28,7 +28,7 @@ typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
|
||||
typedef __u32 __bitwise req_flags_t;
|
||||
|
||||
/* Keep rqf_name[] in sync with the definitions below */
|
||||
enum {
|
||||
enum rqf_flags {
|
||||
/* drive already may have started this one */
|
||||
__RQF_STARTED,
|
||||
/* request for flush sequence */
|
||||
|
Loading…
x
Reference in New Issue
Block a user