diff mbox series

[RFC] bbdev: add raw operation support for additional offloads

Message ID 20210413050006.6579-1-hemant.agrawal@nxp.com (mailing list archive)
State RFC
Delegated to: akhil goyal
Headers show
Series [RFC] bbdev: add raw operation support for additional offloads | expand

Checks

Context Check Description
ci/intel-Testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/checkpatch warning coding style issues

Commit Message

Hemant Agrawal April 13, 2021, 5 a.m. UTC
The exisiting bbdev APIs are limited for the lookaside FEC offload only.

The modem/FPGA can do much more than just the FEC offload. This patch
extend the operation to userdefined raw parameters, where they can
offload more than just the FEC offload processing. e.g. some of the
devices are capable of offloading large part of ORAN High-phy processing
as well.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 lib/librte_bbdev/rte_bbdev.c    |  1 +
 lib/librte_bbdev/rte_bbdev.h    | 64 +++++++++++++++++++++
 lib/librte_bbdev/rte_bbdev_op.h | 98 +++++++++++++++++++++++----------
 3 files changed, 134 insertions(+), 29 deletions(-)

Comments

Chautru, Nicolas April 14, 2021, 12:44 a.m. UTC | #1
Initial question from a quick look, If you need a raw driver, cannot this be done by existing rawdev driver? Why should that fall under bbdev then?

> -----Original Message-----
> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> Sent: Monday, April 12, 2021 10:00 PM
> To: dev@dpdk.org; gakhil@marvell.com; Chautru, Nicolas
> <nicolas.chautru@intel.com>
> Cc: Hemant Agrawal <hemant.agrawal@nxp.com>; Nipun Gupta
> <nipun.gupta@nxp.com>
> Subject: [RFC][PATCH] bbdev: add raw operation support for additional
> offloads
> 
> The exisiting bbdev APIs are limited for the lookaside FEC offload only.
> 
> The modem/FPGA can do much more than just the FEC offload. This patch
> extend the operation to userdefined raw parameters, where they can
> offload more than just the FEC offload processing. e.g. some of the devices
> are capable of offloading large part of ORAN High-phy processing as well.
> 
> Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
>  lib/librte_bbdev/rte_bbdev.c    |  1 +
>  lib/librte_bbdev/rte_bbdev.h    | 64 +++++++++++++++++++++
>  lib/librte_bbdev/rte_bbdev_op.h | 98 +++++++++++++++++++++++----------
>  3 files changed, 134 insertions(+), 29 deletions(-)
> 
> diff --git a/lib/librte_bbdev/rte_bbdev.c b/lib/librte_bbdev/rte_bbdev.c
> index 5ba891c232..9679048ce7 100644
> --- a/lib/librte_bbdev/rte_bbdev.c
> +++ b/lib/librte_bbdev/rte_bbdev.c
> @@ -1126,6 +1126,7 @@ rte_bbdev_op_type_str(enum
> rte_bbdev_op_type op_type)
>  		"RTE_BBDEV_OP_TURBO_ENC",
>  		"RTE_BBDEV_OP_LDPC_DEC",
>  		"RTE_BBDEV_OP_LDPC_ENC",
> +		"RTE_BBDEV_OP_RAW",
>  	};
> 
>  	if (op_type < RTE_BBDEV_OP_TYPE_COUNT) diff --git
> a/lib/librte_bbdev/rte_bbdev.h b/lib/librte_bbdev/rte_bbdev.h index
> 7017124414..fe028524c8 100644
> --- a/lib/librte_bbdev/rte_bbdev.h
> +++ b/lib/librte_bbdev/rte_bbdev.h
> @@ -1,5 +1,6 @@
>  /* SPDX-License-Identifier: BSD-3-Clause
>   * Copyright(c) 2017 Intel Corporation
> + * Copyright 2021 NXP
>   */
> 
>  #ifndef _RTE_BBDEV_H_
> @@ -387,6 +388,15 @@ struct rte_bbdev_queue_data {
>  	bool started;  /**< Queue state */
>  };
> 
> +/** @internal Enqueue encode operations for processing on queue of a
> +device. */ typedef uint16_t (*rte_bbdev_enqueue_raw_op_t)(
> +		struct rte_bbdev_queue_data *q_data,
> +		struct rte_bbdev_raw_op *op);
> +
> +/** @internal Enqueue decode operations for processing on queue of a
> +device. */ typedef struct rte_bbdev_raw_op
> *(*rte_bbdev_dequeue_raw_op_t)(
> +		struct rte_bbdev_queue_data *q_data);
> +
>  /** @internal Enqueue encode operations for processing on queue of a
> device. */  typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
>  		struct rte_bbdev_queue_data *q_data,
> @@ -441,6 +451,10 @@ TAILQ_HEAD(rte_bbdev_cb_list,
> rte_bbdev_callback);
>   * these fields, but should only write to the *_ops fields.
>   */
>  struct __rte_cache_aligned rte_bbdev {
> +	/** Enqueue raw op function */
> +	rte_bbdev_enqueue_raw_op_t enqueue_raw_op;
> +	/** Dequeue raw op function */
> +	rte_bbdev_dequeue_raw_op_t dequeue_raw_op;
>  	/** Enqueue encode function */
>  	rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
>  	/** Enqueue decode function */
> @@ -469,6 +483,33 @@ struct __rte_cache_aligned rte_bbdev {
>  /** @internal array of all devices */
>  extern struct rte_bbdev rte_bbdev_devices[];
> 
> +/**
> + * Enqueue a RAW operation to a queue of the device.
> + * If confirmation is required then the memory for the "op" structure
> +should
> + * be allocated from heap/mempool and should be freed only after
> confirmation.
> + * Otherwise, it shall be on stack or if on heap, should be freed after
> +enqueue
> + * operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param queue_id
> + *   The index of the queue.
> + * @param op
> + *   Pointer containing operation to be enqueued.
> + *
> + * @return
> + *    Status of the enqueue operation.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_bbdev_enqueue_raw_op(uint16_t dev_id, uint16_t queue_id,
> +		struct rte_bbdev_raw_op *op)
> +{
> +	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
> +	struct rte_bbdev_queue_data *q_data = &dev->data-
> >queues[queue_id];
> +	return dev->enqueue_raw_op(q_data, op); }
> +
>  /**
>   * Enqueue a burst of processed encode operations to a queue of the
> device.
>   * This functions only enqueues as many operations as currently possible
> and @@ -593,6 +634,29 @@ rte_bbdev_enqueue_ldpc_dec_ops(uint16_t
> dev_id, uint16_t queue_id,
>  	return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);  }
> 
> +/**
> + * Dequeue a raw operation.
> + * For HOST->MODEM queues, this would provide RAW op which had
> + * "conf_enable" configured at queue initialization.
> + * For MODEM->HOST queues, this would provide RAW op which are sent
> from MODEM.
> + * "op" memory would be internally allocated
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param queue_id
> + *   The index of the queue.
> + *
> + * @return
> + *   Pointer containing dequeued operation.
> + */
> +__rte_experimental
> +static inline struct rte_bbdev_raw_op *
> +rte_bbdev_dequeue_raw_op(uint16_t dev_id, uint16_t queue_id) {
> +	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
> +	struct rte_bbdev_queue_data *q_data = &dev->data-
> >queues[queue_id];
> +	return dev->dequeue_raw_op(q_data);
> +}
> 
>  /**
>   * Dequeue a burst of processed encode operations from a queue of the
> device.
> diff --git a/lib/librte_bbdev/rte_bbdev_op.h
> b/lib/librte_bbdev/rte_bbdev_op.h index f726d7302e..fa993c6222 100644
> --- a/lib/librte_bbdev/rte_bbdev_op.h
> +++ b/lib/librte_bbdev/rte_bbdev_op.h
> @@ -1,5 +1,6 @@
>  /* SPDX-License-Identifier: BSD-3-Clause
>   * Copyright(c) 2017 Intel Corporation
> + * Copyright 2021 NXP
>   */
> 
>  #ifndef _RTE_BBDEV_OP_H_
> @@ -211,36 +212,57 @@ enum rte_bbdev_op_ldpcenc_flag_bitmasks {
> 
>  /** Data input and output buffer for BBDEV operations */  struct
> rte_bbdev_op_data {
> -	/** The mbuf data structure representing the data for BBDEV
> operation.
> -	 *
> -	 * This mbuf pointer can point to one Code Block (CB) data buffer or
> -	 * multiple CBs contiguously located next to each other.
> -	 * A Transport Block (TB) represents a whole piece of data that is
> -	 * divided into one or more CBs. Maximum number of CBs can be
> contained
> -	 * in one TB is defined by
> RTE_BBDEV_(TURBO/LDPC)_MAX_CODE_BLOCKS.
> -	 *
> -	 * An mbuf data structure cannot represent more than one TB. The
> -	 * smallest piece of data that can be contained in one mbuf is one
> CB.
> -	 * An mbuf can include one contiguous CB, subset of contiguous CBs
> that
> -	 * are belonging to one TB, or all contiguous CBs that are belonging
> to
> -	 * one TB.
> -	 *
> -	 * If a BBDEV PMD supports the extended capability "Scatter-Gather",
> -	 * then it is capable of collecting (gathering) non-contiguous
> -	 * (scattered) data from multiple locations in the memory.
> -	 * This capability is reported by the capability flags:
> -	 * - RTE_BBDEV_(TURBO/LDPC)_ENC_SCATTER_GATHER and
> -	 * - RTE_BBDEV_(TURBO/LDPC)_DEC_SCATTER_GATHER.
> -	 * Only if a BBDEV PMD supports this feature, chained mbuf data
> -	 * structures are accepted. A chained mbuf can represent one
> -	 * non-contiguous CB or multiple non-contiguous CBs.
> -	 * If BBDEV PMD does not support this feature, it will assume
> inbound
> -	 * mbuf data contains one segment.
> -	 *
> -	 * The output mbuf data though is always one segment, even if the
> input
> -	 * was a chained mbuf.
> +	/** If set, this indicates that the memory pointer provided in
> +	 * data or mem is a mempory pointer which is a contiguous memory
> +	 * having the data (and is not a mbuf)
>  	 */
> -	struct rte_mbuf *data;
> +	uint32_t is_direct_mem;
> +	union {
> +		/** The mbuf data structure representing the data for BBDEV
> operation.
> +		 *
> +		 * This mbuf pointer can point to one Code Block (CB) data
> buffer or
> +		 * multiple CBs contiguously located next to each other.
> +		 * A Transport Block (TB) represents a whole piece of data
> that is
> +		 * divided into one or more CBs. Maximum number of CBs
> can be contained
> +		 * in one TB is defined by
> RTE_BBDEV_(TURBO/LDPC)_MAX_CODE_BLOCKS.
> +		 *
> +		 * An mbuf data structure cannot represent more than one
> TB. The
> +		 * smallest piece of data that can be contained in one mbuf is
> one CB.
> +		 * An mbuf can include one contiguous CB, subset of
> contiguous CBs that
> +		 * are belonging to one TB, or all contiguous CBs that are
> belonging to
> +		 * one TB.
> +		 *
> +		 * If a BBDEV PMD supports the extended capability "Scatter-
> Gather",
> +		 * then it is capable of collecting (gathering) non-contiguous
> +		 * (scattered) data from multiple locations in the memory.
> +		 * This capability is reported by the capability flags:
> +		 * - RTE_BBDEV_(TURBO/LDPC)_ENC_SCATTER_GATHER and
> +		 * - RTE_BBDEV_(TURBO/LDPC)_DEC_SCATTER_GATHER.
> +		 * Only if a BBDEV PMD supports this feature, chained mbuf
> data
> +		 * structures are accepted. A chained mbuf can represent
> one
> +		 * non-contiguous CB or multiple non-contiguous CBs.
> +		 * If BBDEV PMD does not support this feature, it will assume
> inbound
> +		 * mbuf data contains one segment.
> +		 *
> +		 * The output mbuf data though is always one segment, even
> if the input
> +		 * was a chained mbuf.
> +		 */
> +		struct rte_mbuf *data;
> +
> +		/** bbuf representing the data for BBDEV operation.
> +		 * This is a non scatter-gather buffer which uses length and
> offset
> +		 * parameters from rte_bbdev_op_data structure to evaluate
> the
> +		 * length of the buffer and offset of the starting data
> respectively.
> +		 */
> +		void *bdata;
> +
> +		/** memory pointer representing the data for BBDEV
> operation.
> +		 * This is a contiguous memory which uses length and offset
> +		 * parameters from rte_bbdev_op_data structure to evaluate
> the
> +		 * length of the buffer and offset of the starting data
> respectively.
> +		 */
> +		void *mem;
> +	};
>  	/** The starting point of the BBDEV (encode/decode) operation,
>  	 * in bytes.
>  	 *
> @@ -738,6 +760,7 @@ enum rte_bbdev_op_type {
>  	RTE_BBDEV_OP_TURBO_ENC,  /**< Turbo encode */
>  	RTE_BBDEV_OP_LDPC_DEC,  /**< LDPC decode */
>  	RTE_BBDEV_OP_LDPC_ENC,  /**< LDPC encode */
> +	RTE_BBDEV_OP_RAW, /**< RAW operation */
>  	RTE_BBDEV_OP_TYPE_COUNT,  /**< Count of different op types */
> };
> 
> @@ -749,6 +772,23 @@ enum {
>  	RTE_BBDEV_SYNDROME_ERROR
>  };
> 
> +/** Structure specifying a single raw operation */ struct
> +rte_bbdev_raw_op {
> +	/** RAW operation flags. BBDEV_RAW_OP_IN_VALID /
> BBDEV_RAW_OP_OUT_VALID
> +	 */
> +	uint32_t raw_op_flags;
> +	/** Status of the operation */
> +	uint32_t status;
> +	/** Opaque pointer for user data in case of confirmation. Invalid for
> +	 *  dequeue operation for MODEM -> HOST communication.
> +	 */
> +	void *opaque_data;
> +	/** Input data */
> +	struct rte_bbdev_op_data input;
> +	/** Output data */
> +	struct rte_bbdev_op_data output;
> +};
> +
>  /** Structure specifying a single encode operation */  struct
> rte_bbdev_enc_op {
>  	/** Status of operation that was performed */
> --
> 2.17.1
diff mbox series

Patch

diff --git a/lib/librte_bbdev/rte_bbdev.c b/lib/librte_bbdev/rte_bbdev.c
index 5ba891c232..9679048ce7 100644
--- a/lib/librte_bbdev/rte_bbdev.c
+++ b/lib/librte_bbdev/rte_bbdev.c
@@ -1126,6 +1126,7 @@  rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
 		"RTE_BBDEV_OP_TURBO_ENC",
 		"RTE_BBDEV_OP_LDPC_DEC",
 		"RTE_BBDEV_OP_LDPC_ENC",
+		"RTE_BBDEV_OP_RAW",
 	};
 
 	if (op_type < RTE_BBDEV_OP_TYPE_COUNT)
diff --git a/lib/librte_bbdev/rte_bbdev.h b/lib/librte_bbdev/rte_bbdev.h
index 7017124414..fe028524c8 100644
--- a/lib/librte_bbdev/rte_bbdev.h
+++ b/lib/librte_bbdev/rte_bbdev.h
@@ -1,5 +1,6 @@ 
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright(c) 2017 Intel Corporation
+ * Copyright 2021 NXP
  */
 
 #ifndef _RTE_BBDEV_H_
@@ -387,6 +388,15 @@  struct rte_bbdev_queue_data {
 	bool started;  /**< Queue state */
 };
 
+/** @internal Enqueue encode operations for processing on queue of a device. */
+typedef uint16_t (*rte_bbdev_enqueue_raw_op_t)(
+		struct rte_bbdev_queue_data *q_data,
+		struct rte_bbdev_raw_op *op);
+
+/** @internal Enqueue decode operations for processing on queue of a device. */
+typedef struct rte_bbdev_raw_op *(*rte_bbdev_dequeue_raw_op_t)(
+		struct rte_bbdev_queue_data *q_data);
+
 /** @internal Enqueue encode operations for processing on queue of a device. */
 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
 		struct rte_bbdev_queue_data *q_data,
@@ -441,6 +451,10 @@  TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
  * these fields, but should only write to the *_ops fields.
  */
 struct __rte_cache_aligned rte_bbdev {
+	/** Enqueue raw op function */
+	rte_bbdev_enqueue_raw_op_t enqueue_raw_op;
+	/** Dequeue raw op function */
+	rte_bbdev_dequeue_raw_op_t dequeue_raw_op;
 	/** Enqueue encode function */
 	rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
 	/** Enqueue decode function */
@@ -469,6 +483,33 @@  struct __rte_cache_aligned rte_bbdev {
 /** @internal array of all devices */
 extern struct rte_bbdev rte_bbdev_devices[];
 
+/**
+ * Enqueue a RAW operation to a queue of the device.
+ * If confirmation is required then the memory for the "op" structure should
+ * be allocated from heap/mempool and should be freed only after confirmation.
+ * Otherwise, it shall be on stack or if on heap, should be freed after enqueue
+ * operation.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param queue_id
+ *   The index of the queue.
+ * @param op
+ *   Pointer containing operation to be enqueued.
+ *
+ * @return
+ *    Status of the enqueue operation.
+ */
+__rte_experimental
+static inline uint16_t
+rte_bbdev_enqueue_raw_op(uint16_t dev_id, uint16_t queue_id,
+		struct rte_bbdev_raw_op *op)
+{
+	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
+	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
+	return dev->enqueue_raw_op(q_data, op);
+}
+
 /**
  * Enqueue a burst of processed encode operations to a queue of the device.
  * This functions only enqueues as many operations as currently possible and
@@ -593,6 +634,29 @@  rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
 	return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
 }
 
+/**
+ * Dequeue a raw operation.
+ * For HOST->MODEM queues, this would provide RAW op which had
+ * "conf_enable" configured at queue initialization.
+ * For MODEM->HOST queues, this would provide RAW op which are sent from MODEM.
+ * "op" memory would be internally allocated
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param queue_id
+ *   The index of the queue.
+ *
+ * @return
+ *   Pointer containing dequeued operation.
+ */
+__rte_experimental
+static inline struct rte_bbdev_raw_op *
+rte_bbdev_dequeue_raw_op(uint16_t dev_id, uint16_t queue_id)
+{
+	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
+	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
+	return dev->dequeue_raw_op(q_data);
+}
 
 /**
  * Dequeue a burst of processed encode operations from a queue of the device.
diff --git a/lib/librte_bbdev/rte_bbdev_op.h b/lib/librte_bbdev/rte_bbdev_op.h
index f726d7302e..fa993c6222 100644
--- a/lib/librte_bbdev/rte_bbdev_op.h
+++ b/lib/librte_bbdev/rte_bbdev_op.h
@@ -1,5 +1,6 @@ 
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright(c) 2017 Intel Corporation
+ * Copyright 2021 NXP
  */
 
 #ifndef _RTE_BBDEV_OP_H_
@@ -211,36 +212,57 @@  enum rte_bbdev_op_ldpcenc_flag_bitmasks {
 
 /** Data input and output buffer for BBDEV operations */
 struct rte_bbdev_op_data {
-	/** The mbuf data structure representing the data for BBDEV operation.
-	 *
-	 * This mbuf pointer can point to one Code Block (CB) data buffer or
-	 * multiple CBs contiguously located next to each other.
-	 * A Transport Block (TB) represents a whole piece of data that is
-	 * divided into one or more CBs. Maximum number of CBs can be contained
-	 * in one TB is defined by RTE_BBDEV_(TURBO/LDPC)_MAX_CODE_BLOCKS.
-	 *
-	 * An mbuf data structure cannot represent more than one TB. The
-	 * smallest piece of data that can be contained in one mbuf is one CB.
-	 * An mbuf can include one contiguous CB, subset of contiguous CBs that
-	 * are belonging to one TB, or all contiguous CBs that are belonging to
-	 * one TB.
-	 *
-	 * If a BBDEV PMD supports the extended capability "Scatter-Gather",
-	 * then it is capable of collecting (gathering) non-contiguous
-	 * (scattered) data from multiple locations in the memory.
-	 * This capability is reported by the capability flags:
-	 * - RTE_BBDEV_(TURBO/LDPC)_ENC_SCATTER_GATHER and
-	 * - RTE_BBDEV_(TURBO/LDPC)_DEC_SCATTER_GATHER.
-	 * Only if a BBDEV PMD supports this feature, chained mbuf data
-	 * structures are accepted. A chained mbuf can represent one
-	 * non-contiguous CB or multiple non-contiguous CBs.
-	 * If BBDEV PMD does not support this feature, it will assume inbound
-	 * mbuf data contains one segment.
-	 *
-	 * The output mbuf data though is always one segment, even if the input
-	 * was a chained mbuf.
+	/** If set, this indicates that the memory pointer provided in
+	 * data or mem is a mempory pointer which is a contiguous memory
+	 * having the data (and is not a mbuf)
 	 */
-	struct rte_mbuf *data;
+	uint32_t is_direct_mem;
+	union {
+		/** The mbuf data structure representing the data for BBDEV operation.
+		 *
+		 * This mbuf pointer can point to one Code Block (CB) data buffer or
+		 * multiple CBs contiguously located next to each other.
+		 * A Transport Block (TB) represents a whole piece of data that is
+		 * divided into one or more CBs. Maximum number of CBs can be contained
+		 * in one TB is defined by RTE_BBDEV_(TURBO/LDPC)_MAX_CODE_BLOCKS.
+		 *
+		 * An mbuf data structure cannot represent more than one TB. The
+		 * smallest piece of data that can be contained in one mbuf is one CB.
+		 * An mbuf can include one contiguous CB, subset of contiguous CBs that
+		 * are belonging to one TB, or all contiguous CBs that are belonging to
+		 * one TB.
+		 *
+		 * If a BBDEV PMD supports the extended capability "Scatter-Gather",
+		 * then it is capable of collecting (gathering) non-contiguous
+		 * (scattered) data from multiple locations in the memory.
+		 * This capability is reported by the capability flags:
+		 * - RTE_BBDEV_(TURBO/LDPC)_ENC_SCATTER_GATHER and
+		 * - RTE_BBDEV_(TURBO/LDPC)_DEC_SCATTER_GATHER.
+		 * Only if a BBDEV PMD supports this feature, chained mbuf data
+		 * structures are accepted. A chained mbuf can represent one
+		 * non-contiguous CB or multiple non-contiguous CBs.
+		 * If BBDEV PMD does not support this feature, it will assume inbound
+		 * mbuf data contains one segment.
+		 *
+		 * The output mbuf data though is always one segment, even if the input
+		 * was a chained mbuf.
+		 */
+		struct rte_mbuf *data;
+
+		/** bbuf representing the data for BBDEV operation.
+		 * This is a non scatter-gather buffer which uses length and offset
+		 * parameters from rte_bbdev_op_data structure to evaluate the
+		 * length of the buffer and offset of the starting data respectively.
+		 */
+		void *bdata;
+
+		/** memory pointer representing the data for BBDEV operation.
+		 * This is a contiguous memory which uses length and offset
+		 * parameters from rte_bbdev_op_data structure to evaluate the
+		 * length of the buffer and offset of the starting data respectively.
+		 */
+		void *mem;
+	};
 	/** The starting point of the BBDEV (encode/decode) operation,
 	 * in bytes.
 	 *
@@ -738,6 +760,7 @@  enum rte_bbdev_op_type {
 	RTE_BBDEV_OP_TURBO_ENC,  /**< Turbo encode */
 	RTE_BBDEV_OP_LDPC_DEC,  /**< LDPC decode */
 	RTE_BBDEV_OP_LDPC_ENC,  /**< LDPC encode */
+	RTE_BBDEV_OP_RAW, /**< RAW operation */
 	RTE_BBDEV_OP_TYPE_COUNT,  /**< Count of different op types */
 };
 
@@ -749,6 +772,23 @@  enum {
 	RTE_BBDEV_SYNDROME_ERROR
 };
 
+/** Structure specifying a single raw operation */
+struct rte_bbdev_raw_op {
+	/** RAW operation flags. BBDEV_RAW_OP_IN_VALID / BBDEV_RAW_OP_OUT_VALID
+	 */
+	uint32_t raw_op_flags;
+	/** Status of the operation */
+	uint32_t status;
+	/** Opaque pointer for user data in case of confirmation. Invalid for
+	 *  dequeue operation for MODEM -> HOST communication.
+	 */
+	void *opaque_data;
+	/** Input data */
+	struct rte_bbdev_op_data input;
+	/** Output data */
+	struct rte_bbdev_op_data output;
+};
+
 /** Structure specifying a single encode operation */
 struct rte_bbdev_enc_op {
 	/** Status of operation that was performed */