代码拉取完成,页面将自动刷新
同步操作将从 src-openEuler/dpdk 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
From f69a61bde0e2d72021fd3c609fd4b62edc8f8951 Mon Sep 17 00:00:00 2001
From: David Marchand <david.marchand@redhat.com>
Date: Thu, 16 Jun 2022 16:46:50 +0200
Subject: [PATCH] vhost/crypto: fix build with GCC 12
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[ upstream commit 4414bb67010dfec2559af52efe8f479b26d55447 ]
GCC 12 raises the following warning:
In file included from ../lib/mempool/rte_mempool.h:46,
from ../lib/mbuf/rte_mbuf.h:38,
from ../lib/vhost/vhost_crypto.c:7:
../lib/vhost/vhost_crypto.c: In function ‘rte_vhost_crypto_fetch_requests’:
../lib/eal/x86/include/rte_memcpy.h:371:9: warning: array subscript 1 is
outside array bounds of ‘struct virtio_crypto_op_data_req[1]’
[-Warray-bounds]
371 | rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
../lib/vhost/vhost_crypto.c:1178:42: note: while referencing ‘req’
1178 | struct virtio_crypto_op_data_req req;
| ^~~
Split this function and separate the per descriptor copy.
This makes the code clearer, and the compiler happier.
Note: logs for errors have been moved to callers to avoid duplicates.
Fixes: 3c79609fda7c ("vhost/crypto: handle virtually non-contiguous buffers")
Signed-off-by: David Marchand <david.marchand@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/vhost/vhost_crypto.c | 123 +++++++++++++++------------------------
1 file changed, 46 insertions(+), 77 deletions(-)
diff --git a/lib/vhost/vhost_crypto.c b/lib/vhost/vhost_crypto.c
index 926b5c0bd9..293960d350 100644
--- a/lib/vhost/vhost_crypto.c
+++ b/lib/vhost/vhost_crypto.c
@@ -565,94 +565,58 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req,
return data;
}
-static __rte_always_inline int
-copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
- struct vhost_crypto_desc *head,
- struct vhost_crypto_desc **cur_desc,
- uint32_t size, uint32_t max_n_descs)
+static __rte_always_inline uint32_t
+copy_data_from_desc(void *dst, struct vhost_crypto_data_req *vc_req,
+ struct vhost_crypto_desc *desc, uint32_t size)
{
- struct vhost_crypto_desc *desc = *cur_desc;
- uint64_t remain, addr, dlen, len;
- uint32_t to_copy;
- uint8_t *data = dst_data;
- uint8_t *src;
- int left = size;
-
- to_copy = RTE_MIN(desc->len, (uint32_t)left);
- dlen = to_copy;
- src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
- VHOST_ACCESS_RO);
- if (unlikely(!src || !dlen))
- return -1;
+ uint64_t remain;
+ uint64_t addr;
+
+ remain = RTE_MIN(desc->len, size);
+ addr = desc->addr;
+ do {
+ uint64_t len;
+ void *src;
+
+ len = remain;
+ src = IOVA_TO_VVA(void *, vc_req, addr, &len, VHOST_ACCESS_RO);
+ if (unlikely(src == NULL || len == 0))
+ return 0;
- rte_memcpy((uint8_t *)data, src, dlen);
- data += dlen;
+ rte_memcpy(dst, src, len);
+ remain -= len;
+ /* cast is needed for 32-bit architecture */
+ dst = RTE_PTR_ADD(dst, (size_t)len);
+ addr += len;
+ } while (unlikely(remain != 0));
- if (unlikely(dlen < to_copy)) {
- remain = to_copy - dlen;
- addr = desc->addr + dlen;
+ return RTE_MIN(desc->len, size);
+}
- while (remain) {
- len = remain;
- src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
- VHOST_ACCESS_RO);
- if (unlikely(!src || !len)) {
- VC_LOG_ERR("Failed to map descriptor");
- return -1;
- }
- rte_memcpy(data, src, len);
- addr += len;
- remain -= len;
- data += len;
- }
- }
+static __rte_always_inline int
+copy_data(void *data, struct vhost_crypto_data_req *vc_req,
+ struct vhost_crypto_desc *head, struct vhost_crypto_desc **cur_desc,
+ uint32_t size, uint32_t max_n_descs)
+{
+ struct vhost_crypto_desc *desc = *cur_desc;
+ uint32_t left = size;
- left -= to_copy;
+ do {
+ uint32_t copied;
- while (desc >= head && desc - head < (int)max_n_descs && left) {
- desc++;
- to_copy = RTE_MIN(desc->len, (uint32_t)left);
- dlen = to_copy;
- src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
- VHOST_ACCESS_RO);
- if (unlikely(!src || !dlen)) {
- VC_LOG_ERR("Failed to map descriptor");
+ copied = copy_data_from_desc(data, vc_req, desc, left);
+ if (copied == 0)
return -1;
- }
-
- rte_memcpy(data, src, dlen);
- data += dlen;
-
- if (unlikely(dlen < to_copy)) {
- remain = to_copy - dlen;
- addr = desc->addr + dlen;
-
- while (remain) {
- len = remain;
- src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
- VHOST_ACCESS_RO);
- if (unlikely(!src || !len)) {
- VC_LOG_ERR("Failed to map descriptor");
- return -1;
- }
-
- rte_memcpy(data, src, len);
- addr += len;
- remain -= len;
- data += len;
- }
- }
-
- left -= to_copy;
- }
+ left -= copied;
+ data = RTE_PTR_ADD(data, copied);
+ desc++;
+ } while (desc < head + max_n_descs && left != 0);
- if (unlikely(left > 0)) {
- VC_LOG_ERR("Incorrect virtio descriptor");
+ if (unlikely(left != 0))
return -1;
- }
- if (unlikely(desc - head == (int)max_n_descs))
+ if (unlikely(desc == head + max_n_descs))
*cur_desc = NULL;
else
*cur_desc = desc + 1;
@@ -852,6 +816,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
/* iv */
if (unlikely(copy_data(iv_data, vc_req, head, &desc,
cipher->para.iv_len, max_n_descs))) {
+ VC_LOG_ERR("Incorrect virtio descriptor");
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -883,6 +848,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
vc_req, head, &desc, cipher->para.src_data_len,
max_n_descs) < 0)) {
+ VC_LOG_ERR("Incorrect virtio descriptor");
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -1006,6 +972,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
/* iv */
if (unlikely(copy_data(iv_data, vc_req, head, &desc,
chain->para.iv_len, max_n_descs) < 0)) {
+ VC_LOG_ERR("Incorrect virtio descriptor");
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -1037,6 +1004,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
vc_req, head, &desc, chain->para.src_data_len,
max_n_descs) < 0)) {
+ VC_LOG_ERR("Incorrect virtio descriptor");
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -1121,6 +1089,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
if (unlikely(copy_data(digest_addr, vc_req, head, &digest_desc,
chain->para.hash_result_len,
max_n_descs) < 0)) {
+ VC_LOG_ERR("Incorrect virtio descriptor");
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
--
2.23.0
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。