@@ -514,6 +514,91 @@ pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
}
}
+static int
+mbox_up_handler_mcs_intr_notify(struct dev *dev, struct mcs_intr_info *info, struct msg_rsp *rsp)
+{
+ struct roc_mcs_event_desc desc = {0};
+ struct roc_mcs *mcs;
+
+ plt_base_dbg("pf:%d/vf:%d msg id 0x%x (%s) from: pf:%d/vf:%d", dev_get_pf(dev->pf_func),
+ dev_get_vf(dev->pf_func), info->hdr.id, mbox_id2name(info->hdr.id),
+ dev_get_pf(info->hdr.pcifunc), dev_get_vf(info->hdr.pcifunc));
+
+ mcs = roc_idev_mcs_get(info->mcs_id);
+ if (!mcs)
+ goto exit;
+
+ if (info->intr_mask) {
+ switch (info->intr_mask) {
+ case MCS_CPM_RX_SECTAG_V_EQ1_INT:
+ desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
+ desc.subtype = ROC_MCS_EVENT_RX_SECTAG_V_EQ1;
+ break;
+ case MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT:
+ desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
+ desc.subtype = ROC_MCS_EVENT_RX_SECTAG_E_EQ0_C_EQ1;
+ break;
+ case MCS_CPM_RX_SECTAG_SL_GTE48_INT:
+ desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
+ desc.subtype = ROC_MCS_EVENT_RX_SECTAG_SL_GTE48;
+ break;
+ case MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT:
+ desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
+ desc.subtype = ROC_MCS_EVENT_RX_SECTAG_ES_EQ1_SC_EQ1;
+ break;
+ case MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT:
+ desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
+ desc.subtype = ROC_MCS_EVENT_RX_SECTAG_SC_EQ1_SCB_EQ1;
+ break;
+ case MCS_CPM_RX_PACKET_XPN_EQ0_INT:
+ desc.type = ROC_MCS_EVENT_RX_SA_PN_HARD_EXP;
+ desc.metadata.sa_idx = info->sa_id;
+ break;
+ case MCS_CPM_RX_PN_THRESH_REACHED_INT:
+ desc.type = ROC_MCS_EVENT_RX_SA_PN_SOFT_EXP;
+ desc.metadata.sa_idx = info->sa_id;
+ break;
+ case MCS_CPM_TX_PACKET_XPN_EQ0_INT:
+ desc.type = ROC_MCS_EVENT_TX_SA_PN_HARD_EXP;
+ desc.metadata.sa_idx = info->sa_id;
+ break;
+ case MCS_CPM_TX_PN_THRESH_REACHED_INT:
+ desc.type = ROC_MCS_EVENT_TX_SA_PN_SOFT_EXP;
+ desc.metadata.sa_idx = info->sa_id;
+ break;
+ case MCS_CPM_TX_SA_NOT_VALID_INT:
+ desc.type = ROC_MCS_EVENT_SA_NOT_VALID;
+ break;
+ case MCS_BBE_RX_DFIFO_OVERFLOW_INT:
+ case MCS_BBE_TX_DFIFO_OVERFLOW_INT:
+ desc.type = ROC_MCS_EVENT_FIFO_OVERFLOW;
+ desc.subtype = ROC_MCS_EVENT_DATA_FIFO_OVERFLOW;
+ desc.metadata.lmac_id = info->lmac_id;
+ break;
+ case MCS_BBE_RX_PLFIFO_OVERFLOW_INT:
+ case MCS_BBE_TX_PLFIFO_OVERFLOW_INT:
+ desc.type = ROC_MCS_EVENT_FIFO_OVERFLOW;
+ desc.subtype = ROC_MCS_EVENT_POLICY_FIFO_OVERFLOW;
+ desc.metadata.lmac_id = info->lmac_id;
+ break;
+ case MCS_PAB_RX_CHAN_OVERFLOW_INT:
+ case MCS_PAB_TX_CHAN_OVERFLOW_INT:
+ desc.type = ROC_MCS_EVENT_FIFO_OVERFLOW;
+ desc.subtype = ROC_MCS_EVENT_PKT_ASSM_FIFO_OVERFLOW;
+ desc.metadata.lmac_id = info->lmac_id;
+ break;
+ default:
+ goto exit;
+ }
+
+ mcs_event_cb_process(mcs, &desc);
+ }
+
+exit:
+ rsp->hdr.rc = 0;
+ return 0;
+}
+
static int
mbox_up_handler_cgx_link_event(struct dev *dev, struct cgx_link_info_msg *msg,
struct msg_rsp *rsp)
@@ -602,6 +687,7 @@ mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req)
return err; \
}
MBOX_UP_CGX_MESSAGES
+ MBOX_UP_MCS_MESSAGES
#undef M
}
@@ -316,6 +316,7 @@ struct mbox_msghdr {
M(MCS_GET_SC_STATS, 0xa00e, mcs_get_sc_stats, mcs_stats_req, mcs_sc_stats) \
M(MCS_GET_PORT_STATS, 0xa010, mcs_get_port_stats, mcs_stats_req, mcs_port_stats) \
M(MCS_CLEAR_STATS, 0xa011, mcs_clear_stats, mcs_clear_stats, msg_rsp) \
+ M(MCS_INTR_CFG, 0xa012, mcs_intr_cfg, mcs_intr_cfg, msg_rsp) \
M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, msg_rsp) \
M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, mcs_set_pn_threshold, msg_rsp) \
@@ -324,9 +325,11 @@ struct mbox_msghdr {
M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp) \
M(CGX_PTP_RX_INFO, 0xC01, cgx_ptp_rx_info, cgx_ptp_rx_info_msg, msg_rsp)
+#define MBOX_UP_MCS_MESSAGES M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_##_name = _id,
- MBOX_MESSAGES MBOX_UP_CGX_MESSAGES
+ MBOX_MESSAGES MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES
#undef M
};
@@ -867,6 +870,38 @@ struct mcs_set_active_lmac {
uint64_t __io rsvd;
};
+#define MCS_CPM_RX_SECTAG_V_EQ1_INT BIT_ULL(0)
+#define MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT BIT_ULL(1)
+#define MCS_CPM_RX_SECTAG_SL_GTE48_INT BIT_ULL(2)
+#define MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT BIT_ULL(3)
+#define MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT BIT_ULL(4)
+#define MCS_CPM_RX_PACKET_XPN_EQ0_INT BIT_ULL(5)
+#define MCS_CPM_RX_PN_THRESH_REACHED_INT BIT_ULL(6)
+#define MCS_CPM_TX_PACKET_XPN_EQ0_INT BIT_ULL(7)
+#define MCS_CPM_TX_PN_THRESH_REACHED_INT BIT_ULL(8)
+#define MCS_CPM_TX_SA_NOT_VALID_INT BIT_ULL(9)
+#define MCS_BBE_RX_DFIFO_OVERFLOW_INT BIT_ULL(10)
+#define MCS_BBE_RX_PLFIFO_OVERFLOW_INT BIT_ULL(11)
+#define MCS_BBE_TX_DFIFO_OVERFLOW_INT BIT_ULL(12)
+#define MCS_BBE_TX_PLFIFO_OVERFLOW_INT BIT_ULL(13)
+#define MCS_PAB_RX_CHAN_OVERFLOW_INT BIT_ULL(14)
+#define MCS_PAB_TX_CHAN_OVERFLOW_INT BIT_ULL(15)
+
+struct mcs_intr_cfg {
+ struct mbox_msghdr hdr;
+ uint64_t __io intr_mask; /* Interrupt enable mask */
+ uint8_t __io mcs_id;
+};
+
+struct mcs_intr_info {
+ struct mbox_msghdr hdr;
+ uint64_t __io intr_mask;
+ int __io sa_id;
+ uint8_t __io mcs_id;
+ uint8_t __io lmac_id;
+ uint64_t __io rsvd;
+};
+
struct mcs_set_lmac_mode {
struct mbox_msghdr hdr;
uint8_t __io mode; /* '1' for internal bypass mode (passthrough), '0' for MCS processing */
@@ -5,6 +5,18 @@
#include "roc_api.h"
#include "roc_priv.h"
+struct mcs_event_cb {
+ TAILQ_ENTRY(mcs_event_cb) next;
+ enum roc_mcs_event_type event;
+ roc_mcs_dev_cb_fn cb_fn;
+ void *cb_arg;
+ void *ret_param;
+ uint32_t active;
+};
+TAILQ_HEAD(mcs_event_cb_list, mcs_event_cb);
+
+PLT_STATIC_ASSERT(ROC_MCS_MEM_SZ >= (sizeof(struct mcs_priv) + sizeof(struct mcs_event_cb_list)));
+
int
roc_mcs_hw_info_get(struct roc_mcs_hw_info *hw_info)
{
@@ -109,6 +121,107 @@ roc_mcs_pn_threshold_set(struct roc_mcs *mcs, struct roc_mcs_set_pn_threshold *p
return mbox_process_msg(mcs->mbox, (void *)&rsp);
}
+int
+roc_mcs_intr_configure(struct roc_mcs *mcs, struct roc_mcs_intr_cfg *config)
+{
+ struct mcs_intr_cfg *req;
+ struct msg_rsp *rsp;
+
+ if (config == NULL)
+ return -EINVAL;
+
+ MCS_SUPPORT_CHECK;
+
+ req = mbox_alloc_msg_mcs_intr_cfg(mcs->mbox);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->intr_mask = config->intr_mask;
+ req->mcs_id = mcs->idx;
+
+ return mbox_process_msg(mcs->mbox, (void *)&rsp);
+}
+
+int
+roc_mcs_event_cb_register(struct roc_mcs *mcs, enum roc_mcs_event_type event,
+ roc_mcs_dev_cb_fn cb_fn, void *cb_arg, void *userdata)
+{
+ struct mcs_event_cb_list *cb_list = (struct mcs_event_cb_list *)roc_mcs_to_mcs_cb_list(mcs);
+ struct mcs_event_cb *cb;
+
+ if (cb_fn == NULL || cb_arg == NULL || userdata == NULL)
+ return -EINVAL;
+
+ MCS_SUPPORT_CHECK;
+
+ TAILQ_FOREACH (cb, cb_list, next) {
+ if (cb->cb_fn == cb_fn && cb->cb_arg == cb_arg && cb->event == event)
+ break;
+ }
+
+ if (cb == NULL) {
+ cb = plt_zmalloc(sizeof(struct mcs_event_cb), 0);
+ if (!cb)
+ return -ENOMEM;
+
+ cb->cb_fn = cb_fn;
+ cb->cb_arg = cb_arg;
+ cb->event = event;
+ mcs->userdata = userdata;
+ TAILQ_INSERT_TAIL(cb_list, cb, next);
+ }
+
+ return 0;
+}
+
+int
+roc_mcs_event_cb_unregister(struct roc_mcs *mcs, enum roc_mcs_event_type event)
+{
+ struct mcs_event_cb_list *cb_list = (struct mcs_event_cb_list *)roc_mcs_to_mcs_cb_list(mcs);
+ struct mcs_event_cb *cb, *next;
+
+ MCS_SUPPORT_CHECK;
+
+ for (cb = TAILQ_FIRST(cb_list); cb != NULL; cb = next) {
+ next = TAILQ_NEXT(cb, next);
+
+ if (cb->event != event)
+ continue;
+
+ if (cb->active == 0) {
+ TAILQ_REMOVE(cb_list, cb, next);
+ plt_free(cb);
+ } else {
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+
+int
+mcs_event_cb_process(struct roc_mcs *mcs, struct roc_mcs_event_desc *desc)
+{
+ struct mcs_event_cb_list *cb_list = (struct mcs_event_cb_list *)roc_mcs_to_mcs_cb_list(mcs);
+ struct mcs_event_cb mcs_cb;
+ struct mcs_event_cb *cb;
+ int rc = 0;
+
+ TAILQ_FOREACH (cb, cb_list, next) {
+ if (cb->cb_fn == NULL || cb->event != desc->type)
+ continue;
+
+ mcs_cb = *cb;
+ cb->active = 1;
+ mcs_cb.ret_param = desc;
+
+ rc = mcs_cb.cb_fn(mcs->userdata, mcs_cb.ret_param, mcs_cb.cb_arg);
+ cb->active = 0;
+ }
+
+ return rc;
+}
+
static int
mcs_alloc_bmap(uint16_t entries, void **mem, struct plt_bitmap **bmap)
{
@@ -227,6 +340,7 @@ mcs_alloc_rsrc_bmap(struct roc_mcs *mcs)
struct roc_mcs *
roc_mcs_dev_init(uint8_t mcs_idx)
{
+ struct mcs_event_cb_list *cb_list;
struct roc_mcs *mcs;
struct npa_lf *npa;
@@ -255,6 +369,9 @@ roc_mcs_dev_init(uint8_t mcs_idx)
if (mcs_alloc_rsrc_bmap(mcs))
goto exit;
+ cb_list = (struct mcs_event_cb_list *)roc_mcs_to_mcs_cb_list(mcs);
+ TAILQ_INIT(cb_list);
+
roc_idev_mcs_set(mcs);
mcs->refcount++;
@@ -116,6 +116,34 @@ struct roc_mcs_hw_info {
uint64_t rsvd[16];
};
+#define ROC_MCS_CPM_RX_SECTAG_V_EQ1_INT BIT_ULL(0)
+#define ROC_MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT BIT_ULL(1)
+#define ROC_MCS_CPM_RX_SECTAG_SL_GTE48_INT BIT_ULL(2)
+#define ROC_MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT BIT_ULL(3)
+#define ROC_MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT BIT_ULL(4)
+#define ROC_MCS_CPM_RX_PACKET_XPN_EQ0_INT BIT_ULL(5)
+#define ROC_MCS_CPM_RX_PN_THRESH_REACHED_INT BIT_ULL(6)
+#define ROC_MCS_CPM_TX_PACKET_XPN_EQ0_INT BIT_ULL(7)
+#define ROC_MCS_CPM_TX_PN_THRESH_REACHED_INT BIT_ULL(8)
+#define ROC_MCS_CPM_TX_SA_NOT_VALID_INT BIT_ULL(9)
+#define ROC_MCS_BBE_RX_DFIFO_OVERFLOW_INT BIT_ULL(10)
+#define ROC_MCS_BBE_RX_PLFIFO_OVERFLOW_INT BIT_ULL(11)
+#define ROC_MCS_BBE_TX_DFIFO_OVERFLOW_INT BIT_ULL(12)
+#define ROC_MCS_BBE_TX_PLFIFO_OVERFLOW_INT BIT_ULL(13)
+#define ROC_MCS_PAB_RX_CHAN_OVERFLOW_INT BIT_ULL(14)
+#define ROC_MCS_PAB_TX_CHAN_OVERFLOW_INT BIT_ULL(15)
+
+struct roc_mcs_intr_cfg {
+ uint64_t intr_mask; /* Interrupt enable mask */
+};
+
+struct roc_mcs_intr_info {
+ uint64_t intr_mask;
+ int sa_id;
+ uint8_t lmac_id;
+ uint64_t rsvd;
+};
+
struct roc_mcs_set_lmac_mode {
uint8_t mode; /* '1' for internal bypass mode (passthrough), '0' for MCS processing */
uint8_t lmac_id;
@@ -205,6 +233,113 @@ struct roc_mcs_clear_stats {
uint8_t all; /* All resources stats mapped to PF are cleared */
};
+enum roc_mcs_event_subtype {
+ ROC_MCS_SUBEVENT_UNKNOWN,
+
+ /* subevents of ROC_MCS_EVENT_SECTAG_VAL_ERR sectag validation events
+ * ROC_MCS_EVENT_RX_SECTAG_V_EQ1
+ * Validation check: SecTag.TCI.V = 1
+ * ROC_MCS_EVENT_RX_SECTAG_E_EQ0_C_EQ1
+ * Validation check: SecTag.TCI.E = 0 && SecTag.TCI.C = 1
+ * ROC_MCS_EVENT_RX_SECTAG_SL_GTE48
+ * Validation check: SecTag.SL >= 'd48
+ * ROC_MCS_EVENT_RX_SECTAG_ES_EQ1_SC_EQ1
+ * Validation check: SecTag.TCI.ES = 1 && SecTag.TCI.SC = 1
+ * ROC_MCS_EVENT_RX_SECTAG_SC_EQ1_SCB_EQ1
+ * Validation check: SecTag.TCI.SC = 1 && SecTag.TCI.SCB = 1
+ */
+ ROC_MCS_EVENT_RX_SECTAG_V_EQ1,
+ ROC_MCS_EVENT_RX_SECTAG_E_EQ0_C_EQ1,
+ ROC_MCS_EVENT_RX_SECTAG_SL_GTE48,
+ ROC_MCS_EVENT_RX_SECTAG_ES_EQ1_SC_EQ1,
+ ROC_MCS_EVENT_RX_SECTAG_SC_EQ1_SCB_EQ1,
+
+ /* subevents of ROC_MCS_EVENT_FIFO_OVERFLOW error event
+ * ROC_MCS_EVENT_DATA_FIFO_OVERFLOW:
+ * Notifies data FIFO overflow fatal error in BBE unit.
+ * ROC_MCS_EVENT_POLICY_FIFO_OVERFLOW
+ * Notifies policy FIFO overflow fatal error in BBE unit.
+ * ROC_MCS_EVENT_PKT_ASSM_FIFO_OVERFLOW,
+ * Notifies output FIFO overflow fatal error in PAB unit.
+ */
+ ROC_MCS_EVENT_DATA_FIFO_OVERFLOW,
+ ROC_MCS_EVENT_POLICY_FIFO_OVERFLOW,
+ ROC_MCS_EVENT_PKT_ASSM_FIFO_OVERFLOW,
+};
+
+enum roc_mcs_event_type {
+ ROC_MCS_EVENT_UNKNOWN,
+
+ /* Notifies BBE_INT_DFIFO/PLFIFO_OVERFLOW or PAB_INT_OVERFLOW
+ * interrupts, it's a fatal error that causes packet corruption.
+ */
+ ROC_MCS_EVENT_FIFO_OVERFLOW,
+
+ /* Notifies CPM_RX_SECTAG_X validation error interrupt */
+ ROC_MCS_EVENT_SECTAG_VAL_ERR,
+ /* Notifies CPM_RX_PACKET_XPN_EQ0 (SecTag.PN == 0 in ingress) interrupt */
+ ROC_MCS_EVENT_RX_SA_PN_HARD_EXP,
+ /* Notifies CPM_RX_PN_THRESH_REACHED interrupt */
+ ROC_MCS_EVENT_RX_SA_PN_SOFT_EXP,
+ /* Notifies CPM_TX_PACKET_XPN_EQ0 (PN wrapped in egress) interrupt */
+ ROC_MCS_EVENT_TX_SA_PN_HARD_EXP,
+ /* Notifies CPM_TX_PN_THRESH_REACHED interrupt */
+ ROC_MCS_EVENT_TX_SA_PN_SOFT_EXP,
+ /* Notifies CPM_TX_SA_NOT_VALID interrupt */
+ ROC_MCS_EVENT_SA_NOT_VALID,
+ /* Notifies recovery of software driven port reset */
+ ROC_MCS_EVENT_PORT_RESET_RECOVERY,
+};
+
+union roc_mcs_event_data {
+ /* Valid for below events
+ * - ROC_MCS_EVENT_RX_SA_PN_SOFT_EXP
+ * - ROC_MCS_EVENT_TX_SA_PN_SOFT_EXP
+ */
+ struct {
+ uint8_t secy_idx;
+ uint8_t sc_idx;
+ uint8_t sa_idx;
+ };
+ /* Valid for below event
+ * - ROC_MCS_EVENT_FIFO_OVERFLOW
+ *
+ * Upon fatal error notification on a MCS port, ROC driver resets below attributes of active
+ * flow entities(sc & sa) and than resets the port.
+ * - Reset NEXT_PN of active SAs to 1.
+ * - Reset TX active SA for each SC, TX_SA_ACTIVE = 0, SA_INDEX0_VLD = 1.
+ * - Clear SA_IN_USE for active ANs in RX_SA_MAP_MEM.
+ * - Clear all stats mapping to this port.
+ * - Reactivate SA_IN_USE for active ANs in RX_SA_MAP_MEM.
+ *
+ * ROC driver notifies the following flow entity(sc & sa) details in application callback,
+ * application is expected to exchange the Tx/Rx NEXT_PN, TX_SA_ACTIVE, active RX SC AN
+ * details with peer device so that peer device can resets it's MACsec flow states and than
+ * resume packet transfers.
+ */
+ struct {
+ uint16_t *tx_sa_array; /* Tx SAs whose PN memories were reset (NEXT_PN=1) */
+ uint16_t *rx_sa_array; /* Rx SAs whose PN memories were reset (NEXT_PN=1) */
+ uint16_t *tx_sc_array; /* Tx SCs whose active SAs were reset (TX_SA_ACTIVE=0) */
+ uint16_t *rx_sc_array; /* Rx SCs whose state was reset */
+ uint8_t *sc_an_array; /* AN of Rx SCs(in rx_sc_array) which were reactivated */
+ uint8_t num_tx_sa; /* num entries in tx_sa_array */
+ uint8_t num_rx_sa; /* num entries in rx_sa_array */
+ uint8_t num_tx_sc; /* num entries in tx_sc_array */
+ uint8_t num_rx_sc; /* num entries in rx_sc_array */
+ uint8_t lmac_id; /* lmac_id/port which was recovered from fatal error */
+ };
+};
+
+struct roc_mcs_event_desc {
+ enum roc_mcs_event_type type;
+ enum roc_mcs_event_subtype subtype;
+ union roc_mcs_event_data metadata;
+};
+
+/** User application callback to be registered for any notifications from driver. */
+typedef int (*roc_mcs_dev_cb_fn)(void *userdata, struct roc_mcs_event_desc *desc, void *cb_arg);
+
struct roc_mcs {
TAILQ_ENTRY(roc_mcs) next;
struct plt_pci_device *pci_dev;
@@ -292,4 +427,13 @@ __roc_api int roc_mcs_port_stats_get(struct roc_mcs *mcs, struct roc_mcs_stats_r
/* Clear stats */
__roc_api int roc_mcs_stats_clear(struct roc_mcs *mcs, struct roc_mcs_clear_stats *mcs_req);
+/* Register user callback routines */
+__roc_api int roc_mcs_event_cb_register(struct roc_mcs *mcs, enum roc_mcs_event_type event,
+ roc_mcs_dev_cb_fn cb_fn, void *cb_arg, void *userdata);
+/* Unregister user callback routines */
+__roc_api int roc_mcs_event_cb_unregister(struct roc_mcs *mcs, enum roc_mcs_event_type event);
+
+/* Configure interrupts */
+__roc_api int roc_mcs_intr_configure(struct roc_mcs *mcs, struct roc_mcs_intr_cfg *config);
+
#endif /* _ROC_MCS_H_ */
@@ -62,4 +62,12 @@ roc_mcs_to_mcs_priv(struct roc_mcs *roc_mcs)
return (struct mcs_priv *)&roc_mcs->reserved[0];
}
+static inline void *
+roc_mcs_to_mcs_cb_list(struct roc_mcs *roc_mcs)
+{
+ return (void *)((uintptr_t)roc_mcs->reserved + sizeof(struct mcs_priv));
+}
+
+int mcs_event_cb_process(struct roc_mcs *mcs, struct roc_mcs_event_desc *desc);
+
#endif /* _ROC_MCS_PRIV_H_ */
@@ -139,11 +139,14 @@ INTERNAL {
roc_mcs_dev_init;
roc_mcs_dev_fini;
roc_mcs_dev_get;
+ roc_mcs_event_cb_register;
+ roc_mcs_event_cb_unregister;
roc_mcs_flowid_entry_enable;
roc_mcs_flowid_entry_read;
roc_mcs_flowid_entry_write;
roc_mcs_flowid_stats_get;
roc_mcs_hw_info_get;
+ roc_mcs_intr_configure;
roc_mcs_lmac_mode_set;
roc_mcs_pn_table_write;
roc_mcs_pn_table_read;