get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/133394/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 133394,
    "url": "http://patchwork.dpdk.org/api/patches/133394/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20231026124347.22477-6-syalavarthi@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20231026124347.22477-6-syalavarthi@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20231026124347.22477-6-syalavarthi@marvell.com",
    "date": "2023-10-26T12:43:14",
    "name": "[v9,05/34] ml/cnxk: add generic cnxk xstats structures",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "7c46fda81b15ce83e83cd33e87b0a530bae84d9a",
    "submitter": {
        "id": 2480,
        "url": "http://patchwork.dpdk.org/api/people/2480/?format=api",
        "name": "Srikanth Yalavarthi",
        "email": "syalavarthi@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patchwork.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20231026124347.22477-6-syalavarthi@marvell.com/mbox/",
    "series": [
        {
            "id": 30002,
            "url": "http://patchwork.dpdk.org/api/series/30002/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=30002",
            "date": "2023-10-26T12:43:09",
            "name": "Implementation of revised ml/cnxk driver",
            "version": 9,
            "mbox": "http://patchwork.dpdk.org/series/30002/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/133394/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/133394/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BBE5243208;\n\tThu, 26 Oct 2023 14:45:18 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id CA15E42E50;\n\tThu, 26 Oct 2023 14:44:07 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id F084F427D7\n for <dev@dpdk.org>; Thu, 26 Oct 2023 14:43:57 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 39QALk04007523 for <dev@dpdk.org>; Thu, 26 Oct 2023 05:43:57 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3txcsr25pc-5\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Thu, 26 Oct 2023 05:43:57 -0700",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Thu, 26 Oct 2023 05:43:53 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Thu, 26 Oct 2023 05:43:53 -0700",
            "from ml-host-33.caveonetworks.com (unknown [10.110.143.233])\n by maili.marvell.com (Postfix) with ESMTP id 571453F70C1;\n Thu, 26 Oct 2023 05:43:53 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=WiNwT4bXEUxK8THHOZAw7j+9Shy7CBpAYJSSF9hpUME=;\n b=GOjdvs1zKnx0hCZRoHicuL6JHsoFbYXJOb5b4uxofYAN0O5nJN8HFIV0BQkMZepLpPuh\n ffbbGkTxh4oxR6vhKsLyvt3YAiIv3Vgveph4xKx0v5y1RsxMB3HZpJJgcWKTnnUIJl0C\n FoYSo6Lh/78KCpLxKxD90EDLKh6LDcJipp0Oh7T03QW1IAZ5U8hFjn0YN2B1aV09QFnh\n 4Ee18dyR4Gn4EzouwZ0THE+KCvqjOYfW8vzHo2/j4k112WQvmQwCYEYxvmMSyY+fJUND\n W84O7s1cQtk5knwFZyLu0i25N4FfresEnr+vp2YgE54Mwu774LJGvt/CRoI6gzulqbEN 6w==",
        "From": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "To": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "CC": "<dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,\n <ptakkar@marvell.com>",
        "Subject": "[PATCH v9 05/34] ml/cnxk: add generic cnxk xstats structures",
        "Date": "Thu, 26 Oct 2023 05:43:14 -0700",
        "Message-ID": "<20231026124347.22477-6-syalavarthi@marvell.com>",
        "X-Mailer": "git-send-email 2.42.0",
        "In-Reply-To": "<20231026124347.22477-1-syalavarthi@marvell.com>",
        "References": "<20230830155927.3566-1-syalavarthi@marvell.com>\n <20231026124347.22477-1-syalavarthi@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "U_JhxRpqAmfiaOAPzzg2IuxSq_-nEb1-",
        "X-Proofpoint-GUID": "U_JhxRpqAmfiaOAPzzg2IuxSq_-nEb1-",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.272,Aquarius:18.0.987,Hydra:6.0.619,FMLib:17.11.176.26\n definitions=2023-10-26_10,2023-10-26_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Introduced generic xstats structures and renamed cn10k\nxstats enumerations with cnxk prefix.\n\nSigned-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>\n---\n drivers/ml/cnxk/cn10k_ml_dev.h   |  86 +---------------\n drivers/ml/cnxk/cn10k_ml_model.h |   6 +-\n drivers/ml/cnxk/cn10k_ml_ops.c   | 169 ++++++++++++++-----------------\n drivers/ml/cnxk/cnxk_ml_xstats.h | 128 +++++++++++++++++++++++\n 4 files changed, 209 insertions(+), 180 deletions(-)\n create mode 100644 drivers/ml/cnxk/cnxk_ml_xstats.h",
    "diff": "diff --git a/drivers/ml/cnxk/cn10k_ml_dev.h b/drivers/ml/cnxk/cn10k_ml_dev.h\nindex 1852d4f6c9..be989e0a20 100644\n--- a/drivers/ml/cnxk/cn10k_ml_dev.h\n+++ b/drivers/ml/cnxk/cn10k_ml_dev.h\n@@ -10,6 +10,7 @@\n #include \"cn10k_ml_ocm.h\"\n \n #include \"cnxk_ml_io.h\"\n+#include \"cnxk_ml_xstats.h\"\n \n /* Dummy Device ops */\n extern struct rte_ml_dev_ops ml_dev_dummy_ops;\n@@ -121,89 +122,6 @@ struct cn10k_ml_fw {\n \tstruct cnxk_ml_req *req;\n };\n \n-/* Extended stats types enum */\n-enum cn10k_ml_xstats_type {\n-\t/* Number of models loaded */\n-\tnb_models_loaded,\n-\n-\t/* Number of models unloaded */\n-\tnb_models_unloaded,\n-\n-\t/* Number of models started */\n-\tnb_models_started,\n-\n-\t/* Number of models stopped */\n-\tnb_models_stopped,\n-\n-\t/* Average inference hardware latency */\n-\tavg_hw_latency,\n-\n-\t/* Minimum hardware latency */\n-\tmin_hw_latency,\n-\n-\t/* Maximum hardware latency */\n-\tmax_hw_latency,\n-\n-\t/* Average firmware latency */\n-\tavg_fw_latency,\n-\n-\t/* Minimum firmware latency */\n-\tmin_fw_latency,\n-\n-\t/* Maximum firmware latency */\n-\tmax_fw_latency,\n-};\n-\n-/* Extended stats function type enum. */\n-enum cn10k_ml_xstats_fn_type {\n-\t/* Device function */\n-\tCN10K_ML_XSTATS_FN_DEVICE,\n-\n-\t/* Model function */\n-\tCN10K_ML_XSTATS_FN_MODEL,\n-};\n-\n-/* Function pointer to get xstats for a type */\n-typedef uint64_t (*cn10k_ml_xstats_fn)(struct rte_ml_dev *dev, uint16_t obj_idx,\n-\t\t\t\t       enum cn10k_ml_xstats_type stat);\n-\n-/* Extended stats entry structure */\n-struct cn10k_ml_xstats_entry {\n-\t/* Name-ID map */\n-\tstruct rte_ml_dev_xstats_map map;\n-\n-\t/* xstats mode, device or model */\n-\tenum rte_ml_dev_xstats_mode mode;\n-\n-\t/* Type of xstats */\n-\tenum cn10k_ml_xstats_type type;\n-\n-\t/* xstats function */\n-\tenum cn10k_ml_xstats_fn_type fn_id;\n-\n-\t/* Object ID, model ID for model stat type */\n-\tuint16_t obj_idx;\n-\n-\t/* Allowed to reset the stat */\n-\tuint8_t reset_allowed;\n-\n-\t/* An offset to be taken away to emulate resets */\n-\tuint64_t reset_value;\n-};\n-\n-/* Extended stats data */\n-struct cn10k_ml_xstats {\n-\t/* Pointer to xstats entries */\n-\tstruct cn10k_ml_xstats_entry *entries;\n-\n-\t/* Store num stats and offset of the stats for each model */\n-\tuint16_t count_per_model[ML_CNXK_MAX_MODELS];\n-\tuint16_t offset_for_model[ML_CNXK_MAX_MODELS];\n-\tuint16_t count_mode_device;\n-\tuint16_t count_mode_model;\n-\tuint16_t count;\n-};\n-\n /* Device private data */\n struct cn10k_ml_dev {\n \t/* Device ROC */\n@@ -216,7 +134,7 @@ struct cn10k_ml_dev {\n \tstruct cn10k_ml_ocm ocm;\n \n \t/* Extended stats data */\n-\tstruct cn10k_ml_xstats xstats;\n+\tstruct cnxk_ml_xstats xstats;\n \n \t/* Enable / disable model data caching */\n \tint cache_model_data;\ndiff --git a/drivers/ml/cnxk/cn10k_ml_model.h b/drivers/ml/cnxk/cn10k_ml_model.h\nindex 74ada1531a..5c32f48c68 100644\n--- a/drivers/ml/cnxk/cn10k_ml_model.h\n+++ b/drivers/ml/cnxk/cn10k_ml_model.h\n@@ -404,7 +404,7 @@ struct cn10k_ml_layer_addr {\n };\n \n /* Model fast-path stats */\n-struct cn10k_ml_layer_stats {\n+struct cn10k_ml_layer_xstats {\n \t/* Total hardware latency, sum of all inferences */\n \tuint64_t hw_latency_tot;\n \n@@ -447,10 +447,10 @@ struct cn10k_ml_layer_data {\n \tstruct cnxk_ml_req *req;\n \n \t/* Layer: Stats for burst ops */\n-\tstruct cn10k_ml_layer_stats *burst_stats;\n+\tstruct cn10k_ml_layer_xstats *burst_xstats;\n \n \t/* Layer: Stats for sync ops */\n-\tstruct cn10k_ml_layer_stats *sync_stats;\n+\tstruct cn10k_ml_layer_xstats *sync_xstats;\n };\n \n struct cn10k_ml_model_data {\ndiff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c\nindex 25ebb28993..b470955ffd 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.c\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.c\n@@ -10,6 +10,7 @@\n #include \"cnxk_ml_dev.h\"\n #include \"cnxk_ml_model.h\"\n #include \"cnxk_ml_ops.h\"\n+#include \"cnxk_ml_xstats.h\"\n \n /* ML model macros */\n #define CN10K_ML_MODEL_MEMZONE_NAME \"ml_cn10k_model_mz\"\n@@ -425,26 +426,6 @@ cn10k_ml_prep_fp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cnxk_ml\n \treq->cn10k_req.jd.model_run.num_batches = op->nb_batches;\n }\n \n-struct xstat_info {\n-\tchar name[32];\n-\tenum cn10k_ml_xstats_type type;\n-\tuint8_t reset_allowed;\n-};\n-\n-/* Note: Device stats are not allowed to be reset. */\n-static const struct xstat_info device_stats[] = {\n-\t{\"nb_models_loaded\", nb_models_loaded, 0},\n-\t{\"nb_models_unloaded\", nb_models_unloaded, 0},\n-\t{\"nb_models_started\", nb_models_started, 0},\n-\t{\"nb_models_stopped\", nb_models_stopped, 0},\n-};\n-\n-static const struct xstat_info model_stats[] = {\n-\t{\"Avg-HW-Latency\", avg_hw_latency, 1}, {\"Min-HW-Latency\", min_hw_latency, 1},\n-\t{\"Max-HW-Latency\", max_hw_latency, 1}, {\"Avg-FW-Latency\", avg_fw_latency, 1},\n-\t{\"Min-FW-Latency\", min_fw_latency, 1}, {\"Max-FW-Latency\", max_fw_latency, 1},\n-};\n-\n static int\n cn10k_ml_xstats_init(struct rte_ml_dev *dev)\n {\n@@ -459,10 +440,10 @@ cn10k_ml_xstats_init(struct rte_ml_dev *dev)\n \tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n \t/* Allocate memory for xstats entries. Don't allocate during reconfigure */\n-\tnb_stats = RTE_DIM(device_stats) + ML_CNXK_MAX_MODELS * RTE_DIM(model_stats);\n+\tnb_stats = RTE_DIM(device_xstats) + ML_CNXK_MAX_MODELS * RTE_DIM(layer_xstats);\n \tif (cn10k_mldev->xstats.entries == NULL)\n \t\tcn10k_mldev->xstats.entries = rte_zmalloc(\n-\t\t\t\"cn10k_ml_xstats\", sizeof(struct cn10k_ml_xstats_entry) * nb_stats,\n+\t\t\t\"cn10k_ml_xstats\", sizeof(struct cnxk_ml_xstats_entry) * nb_stats,\n \t\t\tPLT_CACHE_LINE_SIZE);\n \n \tif (cn10k_mldev->xstats.entries == NULL)\n@@ -470,17 +451,17 @@ cn10k_ml_xstats_init(struct rte_ml_dev *dev)\n \n \t/* Initialize device xstats */\n \tstat_id = 0;\n-\tfor (i = 0; i < RTE_DIM(device_stats); i++) {\n+\tfor (i = 0; i < RTE_DIM(device_xstats); i++) {\n \t\tcn10k_mldev->xstats.entries[stat_id].map.id = stat_id;\n \t\tsnprintf(cn10k_mldev->xstats.entries[stat_id].map.name,\n \t\t\t sizeof(cn10k_mldev->xstats.entries[stat_id].map.name), \"%s\",\n-\t\t\t device_stats[i].name);\n+\t\t\t device_xstats[i].name);\n \n \t\tcn10k_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_DEVICE;\n-\t\tcn10k_mldev->xstats.entries[stat_id].type = device_stats[i].type;\n-\t\tcn10k_mldev->xstats.entries[stat_id].fn_id = CN10K_ML_XSTATS_FN_DEVICE;\n+\t\tcn10k_mldev->xstats.entries[stat_id].type = device_xstats[i].type;\n+\t\tcn10k_mldev->xstats.entries[stat_id].fn_id = CNXK_ML_XSTATS_FN_DEVICE;\n \t\tcn10k_mldev->xstats.entries[stat_id].obj_idx = 0;\n-\t\tcn10k_mldev->xstats.entries[stat_id].reset_allowed = device_stats[i].reset_allowed;\n+\t\tcn10k_mldev->xstats.entries[stat_id].reset_allowed = device_xstats[i].reset_allowed;\n \t\tstat_id++;\n \t}\n \tcn10k_mldev->xstats.count_mode_device = stat_id;\n@@ -489,24 +470,24 @@ cn10k_ml_xstats_init(struct rte_ml_dev *dev)\n \tfor (model = 0; model < ML_CNXK_MAX_MODELS; model++) {\n \t\tcn10k_mldev->xstats.offset_for_model[model] = stat_id;\n \n-\t\tfor (i = 0; i < RTE_DIM(model_stats); i++) {\n+\t\tfor (i = 0; i < RTE_DIM(layer_xstats); i++) {\n \t\t\tcn10k_mldev->xstats.entries[stat_id].map.id = stat_id;\n \t\t\tcn10k_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_MODEL;\n-\t\t\tcn10k_mldev->xstats.entries[stat_id].type = model_stats[i].type;\n-\t\t\tcn10k_mldev->xstats.entries[stat_id].fn_id = CN10K_ML_XSTATS_FN_MODEL;\n+\t\t\tcn10k_mldev->xstats.entries[stat_id].type = layer_xstats[i].type;\n+\t\t\tcn10k_mldev->xstats.entries[stat_id].fn_id = CNXK_ML_XSTATS_FN_MODEL;\n \t\t\tcn10k_mldev->xstats.entries[stat_id].obj_idx = model;\n \t\t\tcn10k_mldev->xstats.entries[stat_id].reset_allowed =\n-\t\t\t\tmodel_stats[i].reset_allowed;\n+\t\t\t\tlayer_xstats[i].reset_allowed;\n \n \t\t\t/* Name of xstat is updated during model load */\n \t\t\tsnprintf(cn10k_mldev->xstats.entries[stat_id].map.name,\n \t\t\t\t sizeof(cn10k_mldev->xstats.entries[stat_id].map.name),\n-\t\t\t\t \"Model-%u-%s\", model, model_stats[i].name);\n+\t\t\t\t \"Model-%u-%s\", model, layer_xstats[i].name);\n \n \t\t\tstat_id++;\n \t\t}\n \n-\t\tcn10k_mldev->xstats.count_per_model[model] = RTE_DIM(model_stats);\n+\t\tcn10k_mldev->xstats.count_per_model[model] = RTE_DIM(layer_xstats);\n \t}\n \n \tcn10k_mldev->xstats.count_mode_model = stat_id - cn10k_mldev->xstats.count_mode_device;\n@@ -545,7 +526,7 @@ cn10k_ml_xstats_model_name_update(struct rte_ml_dev *dev, uint16_t model_id)\n \tcnxk_mldev = dev->data->dev_private;\n \tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \tmodel = dev->data->models[model_id];\n-\tstat_id = RTE_DIM(device_stats) + model_id * RTE_DIM(model_stats);\n+\tstat_id = RTE_DIM(device_xstats) + model_id * RTE_DIM(layer_xstats);\n \n \troc_clk_freq_get(&rclk_freq, &sclk_freq);\n \tif (sclk_freq == 0)\n@@ -554,17 +535,17 @@ cn10k_ml_xstats_model_name_update(struct rte_ml_dev *dev, uint16_t model_id)\n \t\tstrcpy(suffix, \"ns\");\n \n \t/* Update xstat name based on model name and sclk availability */\n-\tfor (i = 0; i < RTE_DIM(model_stats); i++) {\n+\tfor (i = 0; i < RTE_DIM(layer_xstats); i++) {\n \t\tsnprintf(cn10k_mldev->xstats.entries[stat_id].map.name,\n \t\t\t sizeof(cn10k_mldev->xstats.entries[stat_id].map.name), \"%s-%s-%s\",\n-\t\t\t model->layer[0].glow.metadata.model.name, model_stats[i].name, suffix);\n+\t\t\t model->layer[0].glow.metadata.model.name, layer_xstats[i].name, suffix);\n \t\tstat_id++;\n \t}\n }\n \n static uint64_t\n cn10k_ml_dev_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx __rte_unused,\n-\t\t       enum cn10k_ml_xstats_type type)\n+\t\t       enum cnxk_ml_xstats_type type)\n {\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \n@@ -590,9 +571,9 @@ cn10k_ml_dev_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx __rte_unused,\n \tdo {                                                                                       \\\n \t\tvalue = 0;                                                                         \\\n \t\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {                      \\\n-\t\t\tvalue += model->layer[0].glow.burst_stats[qp_id].str##_latency_tot;        \\\n-\t\t\tcount += model->layer[0].glow.burst_stats[qp_id].dequeued_count -          \\\n-\t\t\t\t model->layer[0].glow.burst_stats[qp_id].str##_reset_count;        \\\n+\t\t\tvalue += model->layer[0].glow.burst_xstats[qp_id].str##_latency_tot;       \\\n+\t\t\tcount += model->layer[0].glow.burst_xstats[qp_id].dequeued_count -         \\\n+\t\t\t\t model->layer[0].glow.burst_xstats[qp_id].str##_reset_count;       \\\n \t\t}                                                                                  \\\n \t\tif (count != 0)                                                                    \\\n \t\t\tvalue = value / count;                                                     \\\n@@ -603,9 +584,10 @@ cn10k_ml_dev_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx __rte_unused,\n \t\tvalue = UINT64_MAX;                                                                \\\n \t\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {                      \\\n \t\t\tvalue = PLT_MIN(                                                           \\\n-\t\t\t\tvalue, model->layer[0].glow.burst_stats[qp_id].str##_latency_min); \\\n-\t\t\tcount += model->layer[0].glow.burst_stats[qp_id].dequeued_count -          \\\n-\t\t\t\t model->layer[0].glow.burst_stats[qp_id].str##_reset_count;        \\\n+\t\t\t\tvalue,                                                             \\\n+\t\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].str##_latency_min);       \\\n+\t\t\tcount += model->layer[0].glow.burst_xstats[qp_id].dequeued_count -         \\\n+\t\t\t\t model->layer[0].glow.burst_xstats[qp_id].str##_reset_count;       \\\n \t\t}                                                                                  \\\n \t\tif (count == 0)                                                                    \\\n \t\t\tvalue = 0;                                                                 \\\n@@ -616,16 +598,17 @@ cn10k_ml_dev_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx __rte_unused,\n \t\tvalue = 0;                                                                         \\\n \t\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {                      \\\n \t\t\tvalue = PLT_MAX(                                                           \\\n-\t\t\t\tvalue, model->layer[0].glow.burst_stats[qp_id].str##_latency_max); \\\n-\t\t\tcount += model->layer[0].glow.burst_stats[qp_id].dequeued_count -          \\\n-\t\t\t\t model->layer[0].glow.burst_stats[qp_id].str##_reset_count;        \\\n+\t\t\t\tvalue,                                                             \\\n+\t\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].str##_latency_max);       \\\n+\t\t\tcount += model->layer[0].glow.burst_xstats[qp_id].dequeued_count -         \\\n+\t\t\t\t model->layer[0].glow.burst_xstats[qp_id].str##_reset_count;       \\\n \t\t}                                                                                  \\\n \t\tif (count == 0)                                                                    \\\n \t\t\tvalue = 0;                                                                 \\\n \t} while (0)\n \n static uint64_t\n-cn10k_ml_model_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx, enum cn10k_ml_xstats_type type)\n+cn10k_ml_model_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx, enum cnxk_ml_xstats_type type)\n {\n \tstruct cnxk_ml_model *model;\n \tuint16_t rclk_freq; /* MHz */\n@@ -671,8 +654,8 @@ cn10k_ml_model_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx, enum cn10k_ml\n static int\n cn10k_ml_device_xstats_reset(struct rte_ml_dev *dev, const uint16_t stat_ids[], uint16_t nb_ids)\n {\n-\tstruct cn10k_ml_xstats_entry *xs;\n \tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_xstats_entry *xs;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tuint16_t nb_stats;\n \tuint16_t stat_id;\n@@ -708,26 +691,26 @@ cn10k_ml_device_xstats_reset(struct rte_ml_dev *dev, const uint16_t stat_ids[],\n #define ML_AVG_RESET_FOREACH_QP(dev, model, qp_id, str)                                            \\\n \tdo {                                                                                       \\\n \t\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {                      \\\n-\t\t\tmodel->layer[0].glow.burst_stats[qp_id].str##_latency_tot = 0;             \\\n-\t\t\tmodel->layer[0].glow.burst_stats[qp_id].str##_reset_count =                \\\n-\t\t\t\tmodel->layer[0].glow.burst_stats[qp_id].dequeued_count;            \\\n+\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].str##_latency_tot = 0;            \\\n+\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].str##_reset_count =               \\\n+\t\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].dequeued_count;           \\\n \t\t}                                                                                  \\\n \t} while (0)\n \n #define ML_MIN_RESET_FOREACH_QP(dev, model, qp_id, str)                                            \\\n \tdo {                                                                                       \\\n \t\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++)                        \\\n-\t\t\tmodel->layer[0].glow.burst_stats[qp_id].str##_latency_min = UINT64_MAX;    \\\n+\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].str##_latency_min = UINT64_MAX;   \\\n \t} while (0)\n \n #define ML_MAX_RESET_FOREACH_QP(dev, model, qp_id, str)                                            \\\n \tdo {                                                                                       \\\n \t\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++)                        \\\n-\t\t\tmodel->layer[0].glow.burst_stats[qp_id].str##_latency_max = 0;             \\\n+\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].str##_latency_max = 0;            \\\n \t} while (0)\n \n static void\n-cn10k_ml_reset_model_stat(struct rte_ml_dev *dev, uint16_t model_id, enum cn10k_ml_xstats_type type)\n+cn10k_ml_reset_model_stat(struct rte_ml_dev *dev, uint16_t model_id, enum cnxk_ml_xstats_type type)\n {\n \tstruct cnxk_ml_model *model;\n \tuint32_t qp_id;\n@@ -762,8 +745,8 @@ static int\n cn10k_ml_model_xstats_reset(struct rte_ml_dev *dev, int32_t model_id, const uint16_t stat_ids[],\n \t\t\t    uint16_t nb_ids)\n {\n-\tstruct cn10k_ml_xstats_entry *xs;\n \tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_xstats_entry *xs;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cnxk_ml_model *model;\n \tint32_t lcl_model_id = 0;\n@@ -1342,10 +1325,10 @@ static int\n cn10k_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16_t *stat_id,\n \t\t\t\tuint64_t *value)\n {\n-\tstruct cn10k_ml_xstats_entry *xs;\n+\tstruct cnxk_ml_xstats_entry *xs;\n \tstruct cn10k_ml_dev *cn10k_mldev;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n-\tcn10k_ml_xstats_fn fn;\n+\tcnxk_ml_xstats_fn fn;\n \tuint32_t i;\n \n \tcnxk_mldev = dev->data->dev_private;\n@@ -1357,10 +1340,10 @@ cn10k_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16\n \t\t\t\t*stat_id = xs->map.id;\n \n \t\t\tswitch (xs->fn_id) {\n-\t\t\tcase CN10K_ML_XSTATS_FN_DEVICE:\n+\t\t\tcase CNXK_ML_XSTATS_FN_DEVICE:\n \t\t\t\tfn = cn10k_ml_dev_xstat_get;\n \t\t\t\tbreak;\n-\t\t\tcase CN10K_ML_XSTATS_FN_MODEL:\n+\t\t\tcase CNXK_ML_XSTATS_FN_MODEL:\n \t\t\t\tfn = cn10k_ml_model_xstat_get;\n \t\t\t\tbreak;\n \t\t\tdefault:\n@@ -1384,11 +1367,11 @@ static int\n cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode, int32_t model_id,\n \t\t\tconst uint16_t stat_ids[], uint64_t values[], uint16_t nb_ids)\n {\n-\tstruct cn10k_ml_xstats_entry *xs;\n \tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_xstats_entry *xs;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tuint32_t xstats_mode_count;\n-\tcn10k_ml_xstats_fn fn;\n+\tcnxk_ml_xstats_fn fn;\n \tuint64_t val;\n \tuint32_t idx;\n \tuint32_t i;\n@@ -1423,10 +1406,10 @@ cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode\n \t\t}\n \n \t\tswitch (xs->fn_id) {\n-\t\tcase CN10K_ML_XSTATS_FN_DEVICE:\n+\t\tcase CNXK_ML_XSTATS_FN_DEVICE:\n \t\t\tfn = cn10k_ml_dev_xstat_get;\n \t\t\tbreak;\n-\t\tcase CN10K_ML_XSTATS_FN_MODEL:\n+\t\tcase CNXK_ML_XSTATS_FN_MODEL:\n \t\t\tfn = cn10k_ml_model_xstat_get;\n \t\t\tbreak;\n \t\tdefault:\n@@ -1664,7 +1647,7 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,\n \t\t\t  metadata->model.num_input * sizeof(struct rte_ml_io_info) +\n \t\t\t  metadata->model.num_output * sizeof(struct rte_ml_io_info);\n \tmodel_info_size = PLT_ALIGN_CEIL(model_info_size, ML_CN10K_ALIGN_SIZE);\n-\tmodel_stats_size = (dev->data->nb_queue_pairs + 1) * sizeof(struct cn10k_ml_layer_stats);\n+\tmodel_stats_size = (dev->data->nb_queue_pairs + 1) * sizeof(struct cn10k_ml_layer_xstats);\n \n \tmz_size = PLT_ALIGN_CEIL(sizeof(struct cnxk_ml_model), ML_CN10K_ALIGN_SIZE) +\n \t\t  2 * model_data_size + model_scratch_size + model_info_size +\n@@ -1738,24 +1721,24 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,\n \tmodel->layer[0].glow.req = PLT_PTR_ADD(model->info, model_info_size);\n \n \t/* Reset burst and sync stats */\n-\tmodel->layer[0].glow.burst_stats =\n+\tmodel->layer[0].glow.burst_xstats =\n \t\tPLT_PTR_ADD(model->layer[0].glow.req,\n \t\t\t    PLT_ALIGN_CEIL(sizeof(struct cnxk_ml_req), ML_CN10K_ALIGN_SIZE));\n \tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs + 1; qp_id++) {\n-\t\tmodel->layer[0].glow.burst_stats[qp_id].hw_latency_tot = 0;\n-\t\tmodel->layer[0].glow.burst_stats[qp_id].hw_latency_min = UINT64_MAX;\n-\t\tmodel->layer[0].glow.burst_stats[qp_id].hw_latency_max = 0;\n-\t\tmodel->layer[0].glow.burst_stats[qp_id].fw_latency_tot = 0;\n-\t\tmodel->layer[0].glow.burst_stats[qp_id].fw_latency_min = UINT64_MAX;\n-\t\tmodel->layer[0].glow.burst_stats[qp_id].fw_latency_max = 0;\n-\t\tmodel->layer[0].glow.burst_stats[qp_id].hw_reset_count = 0;\n-\t\tmodel->layer[0].glow.burst_stats[qp_id].fw_reset_count = 0;\n-\t\tmodel->layer[0].glow.burst_stats[qp_id].dequeued_count = 0;\n+\t\tmodel->layer[0].glow.burst_xstats[qp_id].hw_latency_tot = 0;\n+\t\tmodel->layer[0].glow.burst_xstats[qp_id].hw_latency_min = UINT64_MAX;\n+\t\tmodel->layer[0].glow.burst_xstats[qp_id].hw_latency_max = 0;\n+\t\tmodel->layer[0].glow.burst_xstats[qp_id].fw_latency_tot = 0;\n+\t\tmodel->layer[0].glow.burst_xstats[qp_id].fw_latency_min = UINT64_MAX;\n+\t\tmodel->layer[0].glow.burst_xstats[qp_id].fw_latency_max = 0;\n+\t\tmodel->layer[0].glow.burst_xstats[qp_id].hw_reset_count = 0;\n+\t\tmodel->layer[0].glow.burst_xstats[qp_id].fw_reset_count = 0;\n+\t\tmodel->layer[0].glow.burst_xstats[qp_id].dequeued_count = 0;\n \t}\n \n-\tmodel->layer[0].glow.sync_stats =\n-\t\tPLT_PTR_ADD(model->layer[0].glow.burst_stats,\n-\t\t\t    dev->data->nb_queue_pairs * sizeof(struct cn10k_ml_layer_stats));\n+\tmodel->layer[0].glow.sync_xstats =\n+\t\tPLT_PTR_ADD(model->layer[0].glow.burst_xstats,\n+\t\t\t    dev->data->nb_queue_pairs * sizeof(struct cn10k_ml_layer_xstats));\n \n \tplt_spinlock_init(&model->lock);\n \tmodel->state = ML_CNXK_MODEL_STATE_LOADED;\n@@ -2308,7 +2291,7 @@ static __rte_always_inline void\n cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cnxk_ml_req *req)\n {\n \tunion cn10k_ml_error_code *error_code;\n-\tstruct cn10k_ml_layer_stats *stats;\n+\tstruct cn10k_ml_layer_xstats *xstats;\n \tstruct cn10k_ml_dev *cn10k_mldev;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_result *result;\n@@ -2326,31 +2309,31 @@ cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cnxk_ml_req *re\n \t\tif (likely(qp_id >= 0)) {\n \t\t\tqp = dev->data->queue_pairs[qp_id];\n \t\t\tqp->stats.dequeued_count++;\n-\t\t\tstats = &model->layer[0].glow.burst_stats[qp_id];\n+\t\t\txstats = &model->layer[0].glow.burst_xstats[qp_id];\n \t\t} else {\n-\t\t\tstats = model->layer[0].glow.sync_stats;\n+\t\t\txstats = model->layer[0].glow.sync_xstats;\n \t\t}\n \n-\t\tif (unlikely(stats->dequeued_count == stats->hw_reset_count)) {\n-\t\t\tstats->hw_latency_min = UINT64_MAX;\n-\t\t\tstats->hw_latency_max = 0;\n+\t\tif (unlikely(xstats->dequeued_count == xstats->hw_reset_count)) {\n+\t\t\txstats->hw_latency_min = UINT64_MAX;\n+\t\t\txstats->hw_latency_max = 0;\n \t\t}\n \n-\t\tif (unlikely(stats->dequeued_count == stats->fw_reset_count)) {\n-\t\t\tstats->fw_latency_min = UINT64_MAX;\n-\t\t\tstats->fw_latency_max = 0;\n+\t\tif (unlikely(xstats->dequeued_count == xstats->fw_reset_count)) {\n+\t\t\txstats->fw_latency_min = UINT64_MAX;\n+\t\t\txstats->fw_latency_max = 0;\n \t\t}\n \n \t\thw_latency = result->stats.hw_end - result->stats.hw_start;\n \t\tfw_latency = result->stats.fw_end - result->stats.fw_start - hw_latency;\n \n-\t\tstats->hw_latency_tot += hw_latency;\n-\t\tstats->hw_latency_min = PLT_MIN(stats->hw_latency_min, hw_latency);\n-\t\tstats->hw_latency_max = PLT_MAX(stats->hw_latency_max, hw_latency);\n-\t\tstats->fw_latency_tot += fw_latency;\n-\t\tstats->fw_latency_min = PLT_MIN(stats->fw_latency_min, fw_latency);\n-\t\tstats->fw_latency_max = PLT_MAX(stats->fw_latency_max, fw_latency);\n-\t\tstats->dequeued_count++;\n+\t\txstats->hw_latency_tot += hw_latency;\n+\t\txstats->hw_latency_min = PLT_MIN(xstats->hw_latency_min, hw_latency);\n+\t\txstats->hw_latency_max = PLT_MAX(xstats->hw_latency_max, hw_latency);\n+\t\txstats->fw_latency_tot += fw_latency;\n+\t\txstats->fw_latency_min = PLT_MIN(xstats->fw_latency_min, fw_latency);\n+\t\txstats->fw_latency_max = PLT_MAX(xstats->fw_latency_max, fw_latency);\n+\t\txstats->dequeued_count++;\n \n \t\top->impl_opaque = result->error_code;\n \t\top->status = RTE_ML_OP_STATUS_SUCCESS;\ndiff --git a/drivers/ml/cnxk/cnxk_ml_xstats.h b/drivers/ml/cnxk/cnxk_ml_xstats.h\nnew file mode 100644\nindex 0000000000..0d405679ca\n--- /dev/null\n+++ b/drivers/ml/cnxk/cnxk_ml_xstats.h\n@@ -0,0 +1,128 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2023 Marvell.\n+ */\n+\n+#ifndef _CNXK_ML_XSTATS_H_\n+#define _CNXK_ML_XSTATS_H_\n+\n+#include \"cnxk_ml_io.h\"\n+\n+/* Extended stats types enum */\n+enum cnxk_ml_xstats_type {\n+\t/* Number of models loaded */\n+\tnb_models_loaded,\n+\n+\t/* Number of models unloaded */\n+\tnb_models_unloaded,\n+\n+\t/* Number of models started */\n+\tnb_models_started,\n+\n+\t/* Number of models stopped */\n+\tnb_models_stopped,\n+\n+\t/* Average inference hardware latency */\n+\tavg_hw_latency,\n+\n+\t/* Minimum hardware latency */\n+\tmin_hw_latency,\n+\n+\t/* Maximum hardware latency */\n+\tmax_hw_latency,\n+\n+\t/* Average firmware latency */\n+\tavg_fw_latency,\n+\n+\t/* Minimum firmware latency */\n+\tmin_fw_latency,\n+\n+\t/* Maximum firmware latency */\n+\tmax_fw_latency,\n+\n+\t/* Average runtime latency */\n+\tavg_rt_latency,\n+\n+\t/* Minimum runtime latency */\n+\tmin_rt_latency,\n+\n+\t/* Maximum runtime latency */\n+\tmax_rt_latency,\n+};\n+\n+/* Extended stats function type enum. */\n+enum cnxk_ml_xstats_fn_type {\n+\t/* Device function */\n+\tCNXK_ML_XSTATS_FN_DEVICE,\n+\n+\t/* Model function */\n+\tCNXK_ML_XSTATS_FN_MODEL,\n+};\n+\n+/* Function pointer to get xstats for a type */\n+typedef uint64_t (*cnxk_ml_xstats_fn)(struct rte_ml_dev *cnxk_mldev, uint16_t obj_idx,\n+\t\t\t\t      enum cnxk_ml_xstats_type stat);\n+\n+/* Extended stats entry structure */\n+struct cnxk_ml_xstats_entry {\n+\t/* Name-ID map */\n+\tstruct rte_ml_dev_xstats_map map;\n+\n+\t/* xstats mode, device or model */\n+\tenum rte_ml_dev_xstats_mode mode;\n+\n+\t/* Type of xstats */\n+\tenum cnxk_ml_xstats_type type;\n+\n+\t/* xstats function */\n+\tenum cnxk_ml_xstats_fn_type fn_id;\n+\n+\t/* Object ID, model ID for model stat type */\n+\tuint16_t obj_idx;\n+\n+\t/* Layer ID, valid for model stat type */\n+\tint32_t layer_id;\n+\n+\t/* Allowed to reset the stat */\n+\tuint8_t reset_allowed;\n+\n+\t/* An offset to be taken away to emulate resets */\n+\tuint64_t reset_value;\n+};\n+\n+/* Extended stats data */\n+struct cnxk_ml_xstats {\n+\t/* Pointer to xstats entries */\n+\tstruct cnxk_ml_xstats_entry *entries;\n+\n+\t/* Store num stats and offset of the stats for each model */\n+\tuint16_t count_per_model[ML_CNXK_MAX_MODELS];\n+\tuint16_t offset_for_model[ML_CNXK_MAX_MODELS];\n+\tuint16_t count_per_layer[ML_CNXK_MAX_MODELS][ML_CNXK_MODEL_MAX_LAYERS];\n+\tuint16_t offset_for_layer[ML_CNXK_MAX_MODELS][ML_CNXK_MODEL_MAX_LAYERS];\n+\tuint16_t count_mode_device;\n+\tuint16_t count_mode_model;\n+\tuint16_t count;\n+};\n+\n+struct cnxk_ml_xstat_info {\n+\tchar name[32];\n+\tenum cnxk_ml_xstats_type type;\n+\tuint8_t reset_allowed;\n+};\n+\n+/* Device xstats. Note: Device stats are not allowed to be reset. */\n+static const struct cnxk_ml_xstat_info device_xstats[] = {\n+\t{\"nb_models_loaded\", nb_models_loaded, 0},\n+\t{\"nb_models_unloaded\", nb_models_unloaded, 0},\n+\t{\"nb_models_started\", nb_models_started, 0},\n+\t{\"nb_models_stopped\", nb_models_stopped, 0},\n+};\n+\n+/* Layer xstats */\n+static const struct cnxk_ml_xstat_info layer_xstats[] = {\n+\t{\"Avg-HW-Latency\", avg_hw_latency, 1}, {\"Min-HW-Latency\", min_hw_latency, 1},\n+\t{\"Max-HW-Latency\", max_hw_latency, 1}, {\"Avg-FW-Latency\", avg_fw_latency, 1},\n+\t{\"Min-FW-Latency\", min_fw_latency, 1}, {\"Max-FW-Latency\", max_fw_latency, 1},\n+};\n+\n+#endif /* _CNXK_ML_XSTATS_H_ */\n",
    "prefixes": [
        "v9",
        "05/34"
    ]
}