diff options
author | Jens Axboe <axboe@kernel.dk> | 2021-10-28 08:34:01 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-10-28 08:34:01 -0600 |
commit | f4aaf1fa8b17e74aa46bf0165c35a1575b239d4e (patch) | |
tree | 612092bfc5cdccd1f714e2690fb22d139ec51e0f | |
parent | e0c60d0102a5ad3475401e1a2faa3d3623eefce4 (diff) | |
parent | 86aeda32b887cdaeb0f4b7bfc9971e36377181c7 (diff) | |
download | linux-starfive-f4aaf1fa8b17e74aa46bf0165c35a1575b239d4e.tar.gz linux-starfive-f4aaf1fa8b17e74aa46bf0165c35a1575b239d4e.tar.bz2 linux-starfive-f4aaf1fa8b17e74aa46bf0165c35a1575b239d4e.zip |
Merge tag 'nvme-5.15-2021-10-28' of git://git.infradead.org/nvme into block-5.15
Pull NVMe fixes from Christoph:
"nvme fixe for Linux 5.15
- fix nvmet-tcp header digest verification (Amit Engel)
- fix a memory leak in nvmet-tcp when releasing a queue
(Maurizio Lombardi)
- fix nvme-tcp H2CData PDU send accounting again (Sagi Grimberg)
- fix digest pointer calculation in nvme-tcp and nvmet-tcp
(Varun Prakash)
- fix possible nvme-tcp req->offset corruption (Varun Prakash)"
* tag 'nvme-5.15-2021-10-28' of git://git.infradead.org/nvme:
nvmet-tcp: fix header digest verification
nvmet-tcp: fix data digest pointer calculation
nvme-tcp: fix data digest pointer calculation
nvme-tcp: fix possible req->offset corruption
nvme-tcp: fix H2CData PDU send accounting (again)
nvmet-tcp: fix a memory leak when releasing a queue
-rw-r--r-- | drivers/nvme/host/tcp.c | 9 | ||||
-rw-r--r-- | drivers/nvme/target/tcp.c | 7 |
2 files changed, 11 insertions, 5 deletions
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 3c1c29dd3020..4ae562d30d2b 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -926,12 +926,14 @@ static void nvme_tcp_fail_request(struct nvme_tcp_request *req) static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; + int req_data_len = req->data_len; while (true) { struct page *page = nvme_tcp_req_cur_page(req); size_t offset = nvme_tcp_req_cur_offset(req); size_t len = nvme_tcp_req_cur_length(req); bool last = nvme_tcp_pdu_last_send(req, len); + int req_data_sent = req->data_sent; int ret, flags = MSG_DONTWAIT; if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) @@ -958,7 +960,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) * in the request where we don't want to modify it as we may * compete with the RX path completing the request. */ - if (req->data_sent + ret < req->data_len) + if (req_data_sent + ret < req_data_len) nvme_tcp_advance_req(req, ret); /* fully successful last send in current PDU */ @@ -1048,10 +1050,11 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; + size_t offset = req->offset; int ret; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { - .iov_base = &req->ddgst + req->offset, + .iov_base = (u8 *)&req->ddgst + req->offset, .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset }; @@ -1064,7 +1067,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) if (unlikely(ret <= 0)) return ret; - if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) { + if (offset + ret == NVME_TCP_DIGEST_LENGTH) { nvme_tcp_done_send_req(queue); return 1; } diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 07ee347ea3f3..46c3b3be7e03 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -702,7 +702,7 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) struct nvmet_tcp_queue *queue = cmd->queue; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { - .iov_base = &cmd->exp_ddgst + cmd->offset, + .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset }; int ret; @@ -1096,7 +1096,7 @@ recv: } if (queue->hdr_digest && - nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) { + nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { nvmet_tcp_fatal_error(queue); /* fatal */ return -EPROTO; } @@ -1428,6 +1428,7 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) static void nvmet_tcp_release_queue_work(struct work_struct *w) { + struct page *page; struct nvmet_tcp_queue *queue = container_of(w, struct nvmet_tcp_queue, release_work); @@ -1447,6 +1448,8 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) nvmet_tcp_free_crypto(queue); ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); + page = virt_to_head_page(queue->pf_cache.va); + __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); kfree(queue); } |