@@ -450,6 +450,218 @@ int32_t cpt_fc_get_ctx_len(void)
return (uint32_t)i;
}
+static int
+cpt_digest_gen_prep(uint32_t flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ digest_params_t *params,
+ void *op,
+ void **prep_req)
+{
+ cpt_request_info_t *req;
+ uint32_t size, i;
+ int32_t m_size;
+ uint16_t data_len, mac_len, key_len;
+ auth_type_t hash_type;
+ buf_ptr_t *meta_p;
+ struct cpt_ctx *ctx;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+ uint8_t *in_buffer;
+ uint32_t g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ void *c_vaddr, *m_vaddr;
+ uint64_t c_dma, m_dma;
+ opcode_info_t opcode;
+
+ if (!params || !params->ctx_buf.vaddr)
+ return ERR_BAD_INPUT_ARG;
+
+ (void)d_offs;
+ ctx = params->ctx_buf.vaddr;
+ meta_p = ¶ms->meta_buf;
+
+ if (!meta_p->vaddr || !meta_p->dma_addr)
+ return ERR_BAD_INPUT_ARG;
+
+ if (meta_p->size < sizeof(cpt_request_info_t))
+ return ERR_BAD_INPUT_ARG;
+
+ m_vaddr = meta_p->vaddr;
+ m_dma = meta_p->dma_addr;
+ m_size = meta_p->size;
+
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ req = m_vaddr;
+
+ size = sizeof(cpt_request_info_t);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* Initialising ctrl and opcode fields */
+
+ req->dma_mode = CTRL_DMA_MODE_SGIO;
+ req->se_req = SE_CORE_REQ;
+
+ hash_type = ctx->hash_type;
+ mac_len = ctx->mac_len;
+ key_len = ctx->auth_key_len;
+ data_len = AUTH_DLEN(d_lens);
+
+ /*GP op header */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param2 = htobe16(((uint16_t)hash_type << 8));
+ if (ctx->hmac) {
+ opcode.s.major = MAJOR_OP_HMAC | DMA_MODE;
+ vq_cmd_w0.s.param1 = htobe16(key_len);
+ vq_cmd_w0.s.dlen = htobe16((data_len + ROUNDUP8(key_len)));
+ } else {
+ opcode.s.major = MAJOR_OP_HASH | DMA_MODE;
+ vq_cmd_w0.s.param1 = 0;
+ vq_cmd_w0.s.dlen = htobe16(data_len);
+ }
+
+ opcode.s.minor = 0;
+
+ vq_cmd_w0.s.opcode = htobe16(opcode.flags);
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input gather list
+ */
+
+ i = 0;
+
+ if (ctx->hmac) {
+ uint64_t k_dma = params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, auth_key);
+ /* Key */
+ i = fill_sg_comp(gather_comp, i, k_dma,
+ ctx->auth_key, ROUNDUP8(key_len));
+ }
+
+ /* input data */
+ size = data_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
+ 0, &size, NULL, 0);
+ if (size) {
+ PMD_TX_LOG(DEBUG, "Insufficient dst IOV size, short by"
+ " %dB\n", size);
+ return ERR_BAD_INPUT_ARG;
+ }
+ } else {
+ /*
+ * Looks like we need to support zero data
+ * gather ptr in case of hash & hmac
+ */
+ i++;
+ }
+ ((uint16_t *)in_buffer)[2] = htobe16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+
+ /*
+ * Output Gather list
+ */
+
+ i = 0;
+ scatter_comp =
+ (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ if (flags & VALID_MAC_BUF) {
+ if (params->mac_buf.size < mac_len)
+ return ERR_BAD_INPUT_ARG;
+
+ size = mac_len;
+ i = fill_sg_comp_from_buf_min(scatter_comp, i,
+ ¶ms->mac_buf, &size);
+ } else {
+ size = mac_len;
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->src_iov, data_len,
+ &size, NULL, 0);
+ if (size) {
+ PMD_TX_LOG(DEBUG, "Insufficient dst IOV size, short by"
+ " %dB\n", size);
+ return ERR_BAD_INPUT_ARG;
+ }
+ }
+
+ ((uint16_t *)in_buffer)[3] = htobe16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = htobe16(size);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+ /* First 16-bit swap then 64-bit swap */
+ /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
+ * to eliminate all the swapping
+ */
+ vq_cmd_w0.u64 = htobe64(vq_cmd_w0.u64);
+
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+#ifdef CPTVF_STRICT_PARAM_CHECK
+ if (!(m_size >= 0))
+ abort();
+#endif
+ *prep_req = req;
+ return 0;
+}
+
static inline int __attribute__((always_inline))
cpt_enc_hmac_prep(uint32_t flags,
uint64_t d_offs,
@@ -2531,6 +2743,9 @@ static inline int __attribute__((always_inline))
} else if (fc_type == KASUMI) {
ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens,
fc_params, op, &prep_req);
+ } else if (fc_type == HASH_HMAC) {
+ ret = cpt_digest_gen_prep(flags, d_offs, d_lens,
+ fc_params, op, &prep_req);
} else {
ret = ERR_EIO;
}