@@ -106,6 +106,14 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
return -EIO;
}
+ /* Configure input queue instruction size. */
+ if (otx_ep->conf->iq.instr_type == OTX_EP_32BYTE_INSTR)
+ reg_val &= ~(CNXK_EP_R_IN_CTL_IS_64B);
+ else
+ reg_val |= CNXK_EP_R_IN_CTL_IS_64B;
+ oct_ep_write64(reg_val, otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(iq_no));
+ iq->desc_size = otx_ep->conf->iq.instr_type;
+
/* Write the start of the input queue's ring and its size */
oct_ep_write64(iq->base_addr_dma, otx_ep->hw_addr + CNXK_EP_R_IN_INSTR_BADDR(iq_no));
oct_ep_write64(iq->nb_desc, otx_ep->hw_addr + CNXK_EP_R_IN_INSTR_RSIZE(iq_no));
@@ -354,7 +362,7 @@ static const struct otx_ep_config default_cnxk_ep_conf = {
/* IQ attributes */
.iq = {
.max_iqs = OTX_EP_CFG_IO_QUEUES,
- .instr_type = OTX_EP_64BYTE_INSTR,
+ .instr_type = OTX_EP_32BYTE_INSTR,
.pending_list_size = (OTX_EP_MAX_IQ_DESCRIPTORS *
OTX_EP_CFG_IO_QUEUES),
},
@@ -10,3 +10,13 @@ sources = files(
'cnxk_ep_vf.c',
'otx_ep_mbox.c',
)
+
+if (toolchain == 'gcc' and cc.version().version_compare('>=11.0.0'))
+ error_cflags += ['-Wno-array-bounds']
+endif
+
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
@@ -256,6 +256,14 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
return -EIO;
}
+ /* Configure input queue instruction size. */
+ if (otx_ep->conf->iq.instr_type == OTX_EP_32BYTE_INSTR)
+ reg_val &= ~(SDP_VF_R_IN_CTL_IS_64B);
+ else
+ reg_val |= SDP_VF_R_IN_CTL_IS_64B;
+ oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(iq_no));
+ iq->desc_size = otx_ep->conf->iq.instr_type;
+
/* Write the start of the input queue's ring and its size */
oct_ep_write64(iq->base_addr_dma, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_BADDR(iq_no));
oct_ep_write64(iq->nb_desc, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_RSIZE(iq_no));
@@ -500,7 +508,7 @@ static const struct otx_ep_config default_otx2_ep_conf = {
/* IQ attributes */
.iq = {
.max_iqs = OTX_EP_CFG_IO_QUEUES,
- .instr_type = OTX_EP_64BYTE_INSTR,
+ .instr_type = OTX_EP_32BYTE_INSTR,
.pending_list_size = (OTX_EP_MAX_IQ_DESCRIPTORS *
OTX_EP_CFG_IO_QUEUES),
},
@@ -11,6 +11,7 @@
#define OTX_EP_MAX_RINGS_PER_VF (8)
#define OTX_EP_CFG_IO_QUEUES OTX_EP_MAX_RINGS_PER_VF
+#define OTX_EP_32BYTE_INSTR (32)
#define OTX_EP_64BYTE_INSTR (64)
/*
* Backpressure for SDP is configured on Octeon, and the minimum queue sizes
@@ -215,6 +216,9 @@ struct otx_ep_instr_queue {
/* Number of descriptors in this ring. */
uint32_t nb_desc;
+ /* Size of the descriptor. */
+ uint8_t desc_size;
+
/* Input ring index, where the driver should write the next packet */
uint32_t host_write_index;
@@ -484,7 +484,7 @@ otx_ep_ring_doorbell(struct otx_ep_device *otx_ep __rte_unused,
static inline int
post_iqcmd(struct otx_ep_instr_queue *iq, uint8_t *iqcmd)
{
- uint8_t *iqptr, cmdsize;
+ uint8_t *iqptr;
/* This ensures that the read index does not wrap around to
* the same position if queue gets full before OCTEON 9 could
@@ -494,10 +494,8 @@ post_iqcmd(struct otx_ep_instr_queue *iq, uint8_t *iqcmd)
return OTX_EP_IQ_SEND_FAILED;
/* Copy cmd into iq */
- cmdsize = 64;
- iqptr = iq->base_addr + (iq->host_write_index << 6);
-
- rte_memcpy(iqptr, iqcmd, cmdsize);
+ iqptr = iq->base_addr + (iq->host_write_index * iq->desc_size);
+ rte_memcpy(iqptr, iqcmd, iq->desc_size);
/* Increment the host write index */
iq->host_write_index =
@@ -120,6 +120,14 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
return -EIO;
}
+ /* Configure input queue instruction size. */
+ if (iq->desc_size == OTX_EP_32BYTE_INSTR)
+ reg_val &= ~(OTX_EP_R_IN_CTL_IS_64B);
+ else
+ reg_val |= OTX_EP_R_IN_CTL_IS_64B;
+ oct_ep_write64(reg_val, otx_ep->hw_addr + OTX_EP_R_IN_CONTROL(iq_no));
+ iq->desc_size = otx_ep->conf->iq.instr_type;
+
/* Write the start of the input queue's ring and its size */
otx_ep_write64(iq->base_addr_dma, otx_ep->hw_addr,
OTX_EP_R_IN_INSTR_BADDR(iq_no));