> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ravi Kumar
> Sent: Thursday, November 30, 2017 1:12 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH 05/11] crypto/ccp: add support for CPU based
> authentication
When enabling this, I got the following error:
In file included from build/include/rte_mempool.h:79:0,
from build/include/rte_mbuf.h:65,
from build/include/rte_cryptodev_pmd.h:51,
from drivers/crypto/ccp/ccp_crypto.c:46:
drivers/crypto/ccp/ccp_crypto.c: In function 'cpu_crypto_auth':
build/include/rte_memcpy.h:367:2: error: array subscript is above array bounds [-Werror=array-bounds]
rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
Also, could you add a commit message in this and the other patches?
If the patch is simple enough, a commit message might not be necessary,
but for patches like this one, I think it is quite useful.
Also, I have an extra comment below.
Thanks,
Pablo
>
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
...
> a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c index
> 4d71ec1..1833929 100644
> --- a/drivers/crypto/ccp/ccp_crypto.c
> +++ b/drivers/crypto/ccp/ccp_crypto.c
...
> +static int cpu_crypto_auth(struct rte_crypto_op *op, struct ccp_session
> *sess,
> + EVP_MD_CTX *ctx)
> +{
> + uint8_t *src, *dst;
> + int srclen, status;
> + struct rte_mbuf *mbuf_src, *mbuf_dst;
> + const EVP_MD *algo = NULL;
> + EVP_PKEY *pkey;
> +
> + algo_select(sess->auth.algo, &algo);
> + pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess-
> >auth.key,
> + sess->auth.key_length);
> + mbuf_src = op->sym->m_src;
> + mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym-
> >m_src;
> + srclen = op->sym->auth.data.length;
> + src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
> + op->sym->auth.data.offset);
> +
> + if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
> + dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
> + sess->auth.digest_length);
There was a change in the previous release, that removed any append in the source mbuf,
to allocate memory for a temporary digest (like in this case). Instead, memory in the queue pair
structure was reserved. This way, you won't have to worry about not having enough space in the mbuf.
@@ -560,6 +560,7 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile PMD for AMD CCP crypto device
#
CONFIG_RTE_LIBRTE_PMD_CCP=n
+CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
#
# Compile PMD for Marvell Crypto device
@@ -54,6 +54,13 @@
#include <openssl/cmac.h> /*sub key apis*/
#include <openssl/evp.h> /*sub key apis*/
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+#endif
+
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
/* SHA initial context values */
static uint32_t ccp_sha1_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
SHA1_H4, SHA1_H3,
@@ -89,6 +96,7 @@ uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
SHA512_H3, SHA512_H2,
SHA512_H1, SHA512_H0,
};
+#endif
static enum ccp_cmd_order
ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
@@ -114,6 +122,7 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
return res;
}
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
/**partial hash using openssl*/
static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
{
@@ -354,6 +363,7 @@ generate_cmac_subkeys(struct ccp_session *sess)
CCP_LOG_ERR("CMAC Init failed");
return -1;
}
+#endif
/**configure session*/
static int
@@ -452,7 +462,9 @@ ccp_configure_session_auth(struct ccp_session *sess,
const struct rte_crypto_sym_xform *xform)
{
const struct rte_crypto_auth_xform *auth_xform = NULL;
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
size_t i;
+#endif
auth_xform = &xform->auth;
@@ -461,6 +473,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.op = CCP_AUTH_OP_GENERATE;
else
sess->auth.op = CCP_AUTH_OP_VERIFY;
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
switch (auth_xform->algo) {
case RTE_CRYPTO_AUTH_SHA1:
sess->auth.engine = CCP_ENGINE_SHA;
@@ -618,6 +631,77 @@ ccp_configure_session_auth(struct ccp_session *sess,
CCP_LOG_ERR("Unsupported hash algo");
return -1;
}
+#else
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - MD5_DIGEST_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ sess->auth.block_size = MD5_BLOCK_SIZE;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported hash algo");
+ return -1;
+ }
+#endif
return 0;
}
@@ -860,12 +944,16 @@ ccp_compute_slot_count(struct ccp_session *session)
count = ccp_cipher_slot(session);
break;
case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
count = ccp_auth_slot(session);
+#endif
break;
case CCP_CMD_CIPHER_HASH:
case CCP_CMD_HASH_CIPHER:
count = ccp_cipher_slot(session);
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
count += ccp_auth_slot(session);
+#endif
break;
case CCP_CMD_COMBINED:
count = ccp_combined_mode_slot(session);
@@ -878,6 +966,123 @@ ccp_compute_slot_count(struct ccp_session *session)
return count;
}
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+static uint8_t
+algo_select(int sessalgo,
+ const EVP_MD **algo)
+{
+ int res = 0;
+
+ switch (sessalgo) {
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ *algo = EVP_md5();
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ *algo = EVP_sha1();
+ break;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ *algo = EVP_sha224();
+ break;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ *algo = EVP_sha256();
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ *algo = EVP_sha384();
+ break;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ *algo = EVP_sha512();
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ return res;
+}
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+ __rte_unused uint8_t *iv,
+ EVP_PKEY *pkey,
+ int srclen,
+ EVP_MD_CTX *ctx,
+ const EVP_MD *algo,
+ uint16_t d_len)
+{
+ size_t dstlen;
+ unsigned char temp_dst[64];
+
+ if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+ goto process_auth_err;
+
+ rte_memcpy(dst, temp_dst, d_len);
+ return 0;
+process_auth_err:
+ CCP_LOG_ERR("Process cpu auth failed");
+ return -EINVAL;
+}
+
+static int cpu_crypto_auth(struct rte_crypto_op *op, struct ccp_session *sess,
+ EVP_MD_CTX *ctx)
+{
+ uint8_t *src, *dst;
+ int srclen, status;
+ struct rte_mbuf *mbuf_src, *mbuf_dst;
+ const EVP_MD *algo = NULL;
+ EVP_PKEY *pkey;
+
+ algo_select(sess->auth.algo, &algo);
+ pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+ sess->auth.key_length);
+ mbuf_src = op->sym->m_src;
+ mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+ srclen = op->sym->auth.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
+ sess->auth.digest_length);
+ } else {
+ dst = op->sym->auth.digest.data;
+ if (dst == NULL) {
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ sess->auth.digest_length);
+ }
+ }
+ status = process_cpu_auth_hmac(src, dst, NULL,
+ pkey, srclen,
+ ctx,
+ algo,
+ sess->auth.digest_length);
+ if (status) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return status;
+ }
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(dst, op->sym->auth.digest.data,
+ sess->auth.digest_length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ rte_pktmbuf_trim(mbuf_src,
+ sess->auth.digest_length);
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ EVP_PKEY_free(pkey);
+ return 0;
+}
+#endif
+
static void
ccp_perform_passthru(struct ccp_passthru *pst,
struct ccp_queue *cmd_q)
@@ -1831,11 +2036,22 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
int i, result = 0;
struct ccp_batch_info *b_info;
struct ccp_session *session;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ EVP_MD_CTX *auth_ctx = NULL;
+#endif
if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
CCP_LOG_ERR("batch info allocation failed");
return 0;
}
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ b_info->auth_ctr = 0;
+#endif
/* populate batch info necessary for dequeue */
b_info->op_idx = 0;
b_info->lsb_buf_idx = 0;
@@ -1856,16 +2072,29 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
result = ccp_crypto_cipher(op[i], cmd_q, b_info);
break;
case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#else
+ b_info->auth_ctr++;
+ result = cpu_crypto_auth(op[i], session, auth_ctx);
+#endif
break;
case CCP_CMD_CIPHER_HASH:
result = ccp_crypto_cipher(op[i], cmd_q, b_info);
if (result)
break;
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#endif
break;
case CCP_CMD_HASH_CIPHER:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#else
+ result = cpu_crypto_auth(op[i], session, auth_ctx);
+ if (op[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+ continue;
+#endif
if (result)
break;
result = ccp_crypto_cipher(op[i], cmd_q, b_info);
@@ -1899,6 +2128,9 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ EVP_MD_CTX_destroy(auth_ctx);
+#endif
return i;
}
@@ -1974,6 +2206,15 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
int i, min_ops;
struct ccp_session *session;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+#endif
min_ops = RTE_MIN(nb_ops, b_info->opcnt);
for (i = 0; i < min_ops; i++) {
@@ -1986,8 +2227,24 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
break;
case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ ccp_auth_dq_prepare(op_d[i]);
+#endif
+ break;
case CCP_CMD_CIPHER_HASH:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ cpu_crypto_auth(op_d[i], session, auth_ctx);
+#else
+ ccp_auth_dq_prepare(op_d[i]);
+#endif
+ break;
case CCP_CMD_HASH_CIPHER:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#else
+ ccp_auth_dq_prepare(op_d[i]);
+#endif
+ break;
case CCP_CMD_COMBINED:
ccp_auth_dq_prepare(op_d[i]);
break;
@@ -1996,6 +2253,9 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
}
}
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ EVP_MD_CTX_destroy(auth_ctx);
+#endif
b_info->opcnt -= min_ops;
return min_ops;
}
@@ -2015,6 +2275,10 @@ process_ops_to_dequeue(struct ccp_qp *qp,
} else if (rte_ring_dequeue(qp->processed_pkts,
(void **)&b_info))
return 0;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ if (b_info->auth_ctr == b_info->opcnt)
+ goto success;
+#endif
cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
CMD_Q_HEAD_LO_BASE);
@@ -68,6 +68,11 @@
#define HMAC_IPAD_VALUE 0x36
#define HMAC_OPAD_VALUE 0x5c
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#define MD5_DIGEST_SIZE 16
+#define MD5_BLOCK_SIZE 64
+#endif
+
/**SHA */
#define SHA1_DIGEST_SIZE 20
#define SHA1_BLOCK_SIZE 64
@@ -236,6 +241,9 @@ enum ccp_hash_algo {
CCP_AUTH_ALGO_SHA512_HMAC,
CCP_AUTH_ALGO_AES_CMAC,
CCP_AUTH_ALGO_AES_GCM,
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ CCP_AUTH_ALGO_MD5_HMAC,
+#endif
};
/**
@@ -39,6 +39,29 @@
#include <ccp_crypto.h>
static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+#endif
{ /* SHA1 */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -88,6 +88,10 @@ struct ccp_batch_info {
phys_addr_t lsb_buf_phys;
/**< LSB intermediate buf for passthru */
int lsb_buf_idx;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ uint16_t auth_ctr;
+ /**< auth only ops batch */
+#endif
} __rte_cache_aligned;
/**< CCP crypto queue pair */