get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/50189/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 50189,
    "url": "http://patchwork.dpdk.org/api/patches/50189/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/1549541873-17403-25-git-send-email-arybchenko@solarflare.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1549541873-17403-25-git-send-email-arybchenko@solarflare.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1549541873-17403-25-git-send-email-arybchenko@solarflare.com",
    "date": "2019-02-07T12:17:47",
    "name": "[24/30] net/sfc: move RxQ shared information to adapter shared",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "6edeafe6b5c0fc398cb7e3390d4745423cdaddba",
    "submitter": {
        "id": 607,
        "url": "http://patchwork.dpdk.org/api/people/607/?format=api",
        "name": "Andrew Rybchenko",
        "email": "arybchenko@solarflare.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patchwork.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/1549541873-17403-25-git-send-email-arybchenko@solarflare.com/mbox/",
    "series": [
        {
            "id": 3407,
            "url": "http://patchwork.dpdk.org/api/series/3407/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=3407",
            "date": "2019-02-07T12:17:25",
            "name": "net/sfc: improve multi-process support",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/3407/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/50189/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/50189/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 9BF6B1B57E;\n\tThu,  7 Feb 2019 13:18:47 +0100 (CET)",
            "from dispatch1-us1.ppe-hosted.com (dispatch1-us1.ppe-hosted.com\n\t[148.163.129.52]) by dpdk.org (Postfix) with ESMTP id 3AE041B4CB\n\tfor <dev@dpdk.org>; Thu,  7 Feb 2019 13:18:19 +0100 (CET)",
            "from webmail.solarflare.com (webmail.solarflare.com\n\t[12.187.104.26])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby mx1-us3.ppe-hosted.com (Proofpoint Essentials ESMTP Server) with\n\tESMTPS id\n\tEEA8360005B for <dev@dpdk.org>; Thu,  7 Feb 2019 12:18:17 +0000 (UTC)",
            "from ocex03.SolarFlarecom.com (10.20.40.36) by\n\tocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server\n\t(TLS) id 15.0.1395.4; Thu, 7 Feb 2019 04:18:10 -0800",
            "from opal.uk.solarflarecom.com (10.17.10.1) by\n\tocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server\n\t(TLS) id\n\t15.0.1395.4 via Frontend Transport; Thu, 7 Feb 2019 04:18:10 -0800",
            "from ukv-loginhost.uk.solarflarecom.com\n\t(ukv-loginhost.uk.solarflarecom.com [10.17.10.39])\n\tby opal.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id\n\tx17CI9RQ013297 for <dev@dpdk.org>; Thu, 7 Feb 2019 12:18:09 GMT",
            "from ukv-loginhost.uk.solarflarecom.com (localhost [127.0.0.1])\n\tby ukv-loginhost.uk.solarflarecom.com (Postfix) with ESMTP id\n\t5921D1613E4\n\tfor <dev@dpdk.org>; Thu,  7 Feb 2019 12:18:09 +0000 (GMT)"
        ],
        "X-Virus-Scanned": "Proofpoint Essentials engine",
        "From": "Andrew Rybchenko <arybchenko@solarflare.com>",
        "To": "<dev@dpdk.org>",
        "Date": "Thu, 7 Feb 2019 12:17:47 +0000",
        "Message-ID": "<1549541873-17403-25-git-send-email-arybchenko@solarflare.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1549541873-17403-1-git-send-email-arybchenko@solarflare.com>",
        "References": "<1549541873-17403-1-git-send-email-arybchenko@solarflare.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-TM-AS-Product-Ver": "SMEX-12.5.0.1300-8.5.1010-24412.006",
        "X-TM-AS-Result": "No-2.425600-4.000000-10",
        "X-TMASE-MatchedRID": "8KqAZX9NEj5JI0Xw8qTX20f49ONH0RaSSjyMfjCRfaPfUZT83lbkEGOr\n\tKvsUT0pALYh4sdP0rP1jtX6mE5W/EKF5AEN4ATBvEhGH3CRdKUUhotH7bEpEMl2VSkpYHvFvv7G\n\tDbroLG9pcbjId9W6SOXZinMYfnxA9oLWVTedAnK+QOktEo73GFJqCl1R34jDPUfLlptXrqAn/hC\n\tYJ6P1MgoaMJnQigimyHoIOUmHREgsLazoQyrpm0nV7tdtvoiba8kp9oxB/3WVeCiaGE+TES6PFj\n\tJEFr+olwXCBO/GKkVqOhzOa6g8KrRWICukF+ps2RUTchcuUKOkEns4IiXTiCWn0+qlxeH9xsIBX\n\tOmkEfwOzmxMlxBuq7D0/7W0LYmn1VZCd7X8/fE2sWHjrfIB4LzcjwNTI6hp5T7Ak0q8bodDU82S\n\tjOFFjcdQ1HcCHDEjsHkxb8IED4reUTGVAhB5EbQ==",
        "X-TM-AS-User-Approved-Sender": "No",
        "X-TM-AS-User-Blocked-Sender": "No",
        "X-TMASE-Result": "10-2.425600-4.000000",
        "X-TMASE-Version": "SMEX-12.5.0.1300-8.5.1010-24412.006",
        "X-MDID": "1549541898-Sf2wU4TlRAWR",
        "Subject": "[dpdk-dev] [PATCH 24/30] net/sfc: move RxQ shared information to\n\tadapter shared",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Prepare to make sfc_adapter primary process private data.\n\nSigned-off-by: Andrew Rybchenko <arybchenko@solarflare.com>\n---\n drivers/net/sfc/sfc.h        |  11 +++-\n drivers/net/sfc/sfc_ethdev.c |  22 +++++---\n drivers/net/sfc/sfc_flow.c   |   6 +-\n drivers/net/sfc/sfc_rx.c     | 105 ++++++++++++++++++-----------------\n 4 files changed, 80 insertions(+), 64 deletions(-)",
    "diff": "diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h\nindex 890a04d0e..21568f959 100644\n--- a/drivers/net/sfc/sfc.h\n+++ b/drivers/net/sfc/sfc.h\n@@ -176,6 +176,9 @@ struct sfc_rss {\n \n /* Adapter private data shared by primary and secondary processes */\n struct sfc_adapter_shared {\n+\tunsigned int\t\t\trxq_count;\n+\tstruct sfc_rxq_info\t\t*rxq_info;\n+\n \tstruct rte_pci_addr\t\tpci_addr;\n \tuint16_t\t\t\tport_id;\n \n@@ -271,8 +274,6 @@ struct sfc_adapter {\n \tbool\t\t\t\tmgmt_evq_running;\n \tstruct sfc_evq\t\t\t*mgmt_evq;\n \n-\tunsigned int\t\t\trxq_count;\n-\tstruct sfc_rxq_info\t\t*rxq_info;\n \tstruct sfc_rxq\t\t\t*rxq_ctrl;\n \n \tunsigned int\t\t\ttxq_count;\n@@ -294,6 +295,12 @@ sfc_adapter_shared_by_eth_dev(struct rte_eth_dev *eth_dev)\n \treturn sa->priv.shared;\n }\n \n+static inline struct sfc_adapter_shared *\n+sfc_sa2shared(struct sfc_adapter *sa)\n+{\n+\treturn sa->priv.shared;\n+}\n+\n /*\n  * Add wrapper functions to acquire/release lock to be able to remove or\n  * change the lock in one place.\ndiff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c\nindex e30217cf1..a84690bb0 100644\n--- a/drivers/net/sfc/sfc_ethdev.c\n+++ b/drivers/net/sfc/sfc_ethdev.c\n@@ -402,6 +402,7 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \t\t   const struct rte_eth_rxconf *rx_conf,\n \t\t   struct rte_mempool *mb_pool)\n {\n+\tstruct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);\n \tstruct sfc_adapter *sa = dev->data->dev_private;\n \tint rc;\n \n@@ -415,7 +416,7 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \tif (rc != 0)\n \t\tgoto fail_rx_qinit;\n \n-\tdev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].dp;\n+\tdev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp;\n \n \tsfc_adapter_unlock(sa);\n \n@@ -1067,14 +1068,15 @@ static void\n sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \t\t      struct rte_eth_rxq_info *qinfo)\n {\n+\tstruct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);\n \tstruct sfc_adapter *sa = dev->data->dev_private;\n \tstruct sfc_rxq_info *rxq_info;\n \n \tsfc_adapter_lock(sa);\n \n-\tSFC_ASSERT(rx_queue_id < sa->rxq_count);\n+\tSFC_ASSERT(rx_queue_id < sas->rxq_count);\n \n-\trxq_info = &sa->rxq_info[rx_queue_id];\n+\trxq_info = &sas->rxq_info[rx_queue_id];\n \n \tqinfo->mp = rxq_info->refill_mb_pool;\n \tqinfo->conf.rx_free_thresh = rxq_info->refill_threshold;\n@@ -1125,11 +1127,11 @@ static uint32_t\n sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n \tconst struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);\n-\tstruct sfc_adapter *sa = dev->data->dev_private;\n+\tstruct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);\n \tstruct sfc_rxq_info *rxq_info;\n \n-\tSFC_ASSERT(rx_queue_id < sa->rxq_count);\n-\trxq_info = &sa->rxq_info[rx_queue_id];\n+\tSFC_ASSERT(rx_queue_id < sas->rxq_count);\n+\trxq_info = &sas->rxq_info[rx_queue_id];\n \n \tif ((rxq_info->state & SFC_RXQ_STARTED) == 0)\n \t\treturn 0;\n@@ -1185,6 +1187,7 @@ sfc_tx_descriptor_status(void *queue, uint16_t offset)\n static int\n sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n+\tstruct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);\n \tstruct sfc_adapter *sa = dev->data->dev_private;\n \tint rc;\n \n@@ -1196,14 +1199,14 @@ sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \tif (sa->state != SFC_ADAPTER_STARTED)\n \t\tgoto fail_not_started;\n \n-\tif (sa->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED)\n+\tif (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED)\n \t\tgoto fail_not_setup;\n \n \trc = sfc_rx_qstart(sa, rx_queue_id);\n \tif (rc != 0)\n \t\tgoto fail_rx_qstart;\n \n-\tsa->rxq_info[rx_queue_id].deferred_started = B_TRUE;\n+\tsas->rxq_info[rx_queue_id].deferred_started = B_TRUE;\n \n \tsfc_adapter_unlock(sa);\n \n@@ -1220,6 +1223,7 @@ sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n static int\n sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n+\tstruct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);\n \tstruct sfc_adapter *sa = dev->data->dev_private;\n \n \tsfc_log_init(sa, \"RxQ=%u\", rx_queue_id);\n@@ -1227,7 +1231,7 @@ sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \tsfc_adapter_lock(sa);\n \tsfc_rx_qstop(sa, rx_queue_id);\n \n-\tsa->rxq_info[rx_queue_id].deferred_started = B_FALSE;\n+\tsas->rxq_info[rx_queue_id].deferred_started = B_FALSE;\n \n \tsfc_adapter_unlock(sa);\n \ndiff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c\nindex e20c2e612..ab5f24f51 100644\n--- a/drivers/net/sfc/sfc_flow.c\n+++ b/drivers/net/sfc/sfc_flow.c\n@@ -1241,7 +1241,7 @@ sfc_flow_parse_queue(struct sfc_adapter *sa,\n {\n \tstruct sfc_rxq *rxq;\n \n-\tif (queue->index >= sa->rxq_count)\n+\tif (queue->index >= sfc_sa2shared(sa)->rxq_count)\n \t\treturn -EINVAL;\n \n \trxq = &sa->rxq_ctrl[queue->index];\n@@ -1268,7 +1268,7 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,\n \tif (action_rss->queue_num == 0)\n \t\treturn -EINVAL;\n \n-\trxq_sw_index = sa->rxq_count - 1;\n+\trxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;\n \trxq = &sa->rxq_ctrl[rxq_sw_index];\n \trxq_hw_index_min = rxq->hw_index;\n \trxq_hw_index_max = 0;\n@@ -1276,7 +1276,7 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,\n \tfor (i = 0; i < action_rss->queue_num; ++i) {\n \t\trxq_sw_index = action_rss->queue[i];\n \n-\t\tif (rxq_sw_index >= sa->rxq_count)\n+\t\tif (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)\n \t\t\treturn -EINVAL;\n \n \t\trxq = &sa->rxq_ctrl[rxq_sw_index];\ndiff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c\nindex eb4875fec..8af9d2148 100644\n--- a/drivers/net/sfc/sfc_rx.c\n+++ b/drivers/net/sfc/sfc_rx.c\n@@ -381,15 +381,15 @@ sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)\n {\n \tconst struct sfc_dp_queue *dpq = &dp_rxq->dpq;\n \tstruct rte_eth_dev *eth_dev;\n-\tstruct sfc_adapter *sa;\n+\tstruct sfc_adapter_shared *sas;\n \n \tSFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));\n \teth_dev = &rte_eth_devices[dpq->port_id];\n \n-\tsa = eth_dev->data->dev_private;\n+\tsas = sfc_adapter_shared_by_eth_dev(eth_dev);\n \n-\tSFC_ASSERT(dpq->queue_id < sa->rxq_count);\n-\treturn &sa->rxq_info[dpq->queue_id];\n+\tSFC_ASSERT(dpq->queue_id < sas->rxq_count);\n+\treturn &sas->rxq_info[dpq->queue_id];\n }\n \n struct sfc_rxq *\n@@ -404,7 +404,7 @@ sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)\n \n \tsa = eth_dev->data->dev_private;\n \n-\tSFC_ASSERT(dpq->queue_id < sa->rxq_count);\n+\tSFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->rxq_count);\n \treturn &sa->rxq_ctrl[dpq->queue_id];\n }\n \n@@ -567,7 +567,7 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)\n \tunsigned int wait_count;\n \tint rc;\n \n-\trxq_info = &sa->rxq_info[sw_index];\n+\trxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];\n \tSFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);\n \n \trxq = &sa->rxq_ctrl[sw_index];\n@@ -677,9 +677,9 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)\n \n \tsfc_log_init(sa, \"sw_index=%u\", sw_index);\n \n-\tSFC_ASSERT(sw_index < sa->rxq_count);\n+\tSFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);\n \n-\trxq_info = &sa->rxq_info[sw_index];\n+\trxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];\n \tSFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);\n \n \trxq = &sa->rxq_ctrl[sw_index];\n@@ -766,9 +766,9 @@ sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)\n \n \tsfc_log_init(sa, \"sw_index=%u\", sw_index);\n \n-\tSFC_ASSERT(sw_index < sa->rxq_count);\n+\tSFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);\n \n-\trxq_info = &sa->rxq_info[sw_index];\n+\trxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];\n \n \tif (rxq_info->state == SFC_RXQ_INITIALIZED)\n \t\treturn;\n@@ -1007,8 +1007,8 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,\n \t\tgoto fail_bad_conf;\n \t}\n \n-\tSFC_ASSERT(sw_index < sa->rxq_count);\n-\trxq_info = &sa->rxq_info[sw_index];\n+\tSFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);\n+\trxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];\n \n \tSFC_ASSERT(rxq_entries <= rxq_info->max_entries);\n \trxq_info->entries = rxq_entries;\n@@ -1098,10 +1098,10 @@ sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)\n \tstruct sfc_rxq_info *rxq_info;\n \tstruct sfc_rxq *rxq;\n \n-\tSFC_ASSERT(sw_index < sa->rxq_count);\n+\tSFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);\n \tsa->eth_dev->data->rx_queues[sw_index] = NULL;\n \n-\trxq_info = &sa->rxq_info[sw_index];\n+\trxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];\n \n \tSFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);\n \n@@ -1345,10 +1345,11 @@ sfc_rx_rss_config(struct sfc_adapter *sa)\n int\n sfc_rx_start(struct sfc_adapter *sa)\n {\n+\tstruct sfc_adapter_shared * const sas = sfc_sa2shared(sa);\n \tunsigned int sw_index;\n \tint rc;\n \n-\tsfc_log_init(sa, \"rxq_count=%u\", sa->rxq_count);\n+\tsfc_log_init(sa, \"rxq_count=%u\", sas->rxq_count);\n \n \trc = efx_rx_init(sa->nic);\n \tif (rc != 0)\n@@ -1358,10 +1359,10 @@ sfc_rx_start(struct sfc_adapter *sa)\n \tif (rc != 0)\n \t\tgoto fail_rss_config;\n \n-\tfor (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {\n-\t\tif (sa->rxq_info[sw_index].state == SFC_RXQ_INITIALIZED &&\n-\t\t    (!sa->rxq_info[sw_index].deferred_start ||\n-\t\t     sa->rxq_info[sw_index].deferred_started)) {\n+\tfor (sw_index = 0; sw_index < sas->rxq_count; ++sw_index) {\n+\t\tif (sas->rxq_info[sw_index].state == SFC_RXQ_INITIALIZED &&\n+\t\t    (!sas->rxq_info[sw_index].deferred_start ||\n+\t\t     sas->rxq_info[sw_index].deferred_started)) {\n \t\t\trc = sfc_rx_qstart(sa, sw_index);\n \t\t\tif (rc != 0)\n \t\t\t\tgoto fail_rx_qstart;\n@@ -1385,13 +1386,14 @@ sfc_rx_start(struct sfc_adapter *sa)\n void\n sfc_rx_stop(struct sfc_adapter *sa)\n {\n+\tstruct sfc_adapter_shared * const sas = sfc_sa2shared(sa);\n \tunsigned int sw_index;\n \n-\tsfc_log_init(sa, \"rxq_count=%u\", sa->rxq_count);\n+\tsfc_log_init(sa, \"rxq_count=%u\", sas->rxq_count);\n \n-\tsw_index = sa->rxq_count;\n+\tsw_index = sas->rxq_count;\n \twhile (sw_index-- > 0) {\n-\t\tif (sa->rxq_info[sw_index].state & SFC_RXQ_STARTED)\n+\t\tif (sas->rxq_info[sw_index].state & SFC_RXQ_STARTED)\n \t\t\tsfc_rx_qstop(sa, sw_index);\n \t}\n \n@@ -1401,7 +1403,8 @@ sfc_rx_stop(struct sfc_adapter *sa)\n static int\n sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)\n {\n-\tstruct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];\n+\tstruct sfc_adapter_shared * const sas = sfc_sa2shared(sa);\n+\tstruct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index];\n \tunsigned int max_entries;\n \n \tmax_entries = EFX_RXQ_MAXNDESCS;\n@@ -1463,17 +1466,18 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)\n static void\n sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)\n {\n+\tstruct sfc_adapter_shared * const sas = sfc_sa2shared(sa);\n \tint sw_index;\n \n-\tSFC_ASSERT(nb_rx_queues <= sa->rxq_count);\n+\tSFC_ASSERT(nb_rx_queues <= sas->rxq_count);\n \n-\tsw_index = sa->rxq_count;\n+\tsw_index = sas->rxq_count;\n \twhile (--sw_index >= (int)nb_rx_queues) {\n-\t\tif (sa->rxq_info[sw_index].state & SFC_RXQ_INITIALIZED)\n+\t\tif (sas->rxq_info[sw_index].state & SFC_RXQ_INITIALIZED)\n \t\t\tsfc_rx_qfini(sa, sw_index);\n \t}\n \n-\tsa->rxq_count = nb_rx_queues;\n+\tsas->rxq_count = nb_rx_queues;\n }\n \n /**\n@@ -1487,27 +1491,28 @@ sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)\n int\n sfc_rx_configure(struct sfc_adapter *sa)\n {\n+\tstruct sfc_adapter_shared * const sas = sfc_sa2shared(sa);\n \tstruct sfc_rss *rss = &sa->rss;\n \tstruct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;\n \tconst unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;\n \tint rc;\n \n \tsfc_log_init(sa, \"nb_rx_queues=%u (old %u)\",\n-\t\t     nb_rx_queues, sa->rxq_count);\n+\t\t     nb_rx_queues, sas->rxq_count);\n \n \trc = sfc_rx_check_mode(sa, &dev_conf->rxmode);\n \tif (rc != 0)\n \t\tgoto fail_check_mode;\n \n-\tif (nb_rx_queues == sa->rxq_count)\n+\tif (nb_rx_queues == sas->rxq_count)\n \t\tgoto configure_rss;\n \n-\tif (sa->rxq_info == NULL) {\n+\tif (sas->rxq_info == NULL) {\n \t\trc = ENOMEM;\n-\t\tsa->rxq_info = rte_calloc_socket(\"sfc-rxqs\", nb_rx_queues,\n-\t\t\t\t\t\t sizeof(sa->rxq_info[0]), 0,\n-\t\t\t\t\t\t sa->socket_id);\n-\t\tif (sa->rxq_info == NULL)\n+\t\tsas->rxq_info = rte_calloc_socket(\"sfc-rxqs\", nb_rx_queues,\n+\t\t\t\t\t\t  sizeof(sas->rxq_info[0]), 0,\n+\t\t\t\t\t\t  sa->socket_id);\n+\t\tif (sas->rxq_info == NULL)\n \t\t\tgoto fail_rxqs_alloc;\n \n \t\t/*\n@@ -1522,13 +1527,13 @@ sfc_rx_configure(struct sfc_adapter *sa)\n \t\tstruct sfc_rxq_info *new_rxq_info;\n \t\tstruct sfc_rxq *new_rxq_ctrl;\n \n-\t\tif (nb_rx_queues < sa->rxq_count)\n+\t\tif (nb_rx_queues < sas->rxq_count)\n \t\t\tsfc_rx_fini_queues(sa, nb_rx_queues);\n \n \t\trc = ENOMEM;\n \t\tnew_rxq_info =\n-\t\t\trte_realloc(sa->rxq_info,\n-\t\t\t\t    nb_rx_queues * sizeof(sa->rxq_info[0]), 0);\n+\t\t\trte_realloc(sas->rxq_info,\n+\t\t\t\t    nb_rx_queues * sizeof(sas->rxq_info[0]), 0);\n \t\tif (new_rxq_info == NULL && nb_rx_queues > 0)\n \t\t\tgoto fail_rxqs_realloc;\n \n@@ -1538,29 +1543,29 @@ sfc_rx_configure(struct sfc_adapter *sa)\n \t\tif (new_rxq_ctrl == NULL && nb_rx_queues > 0)\n \t\t\tgoto fail_rxqs_ctrl_realloc;\n \n-\t\tsa->rxq_info = new_rxq_info;\n+\t\tsas->rxq_info = new_rxq_info;\n \t\tsa->rxq_ctrl = new_rxq_ctrl;\n-\t\tif (nb_rx_queues > sa->rxq_count) {\n-\t\t\tmemset(&sa->rxq_info[sa->rxq_count], 0,\n-\t\t\t       (nb_rx_queues - sa->rxq_count) *\n-\t\t\t       sizeof(sa->rxq_info[0]));\n-\t\t\tmemset(&sa->rxq_ctrl[sa->rxq_count], 0,\n-\t\t\t       (nb_rx_queues - sa->rxq_count) *\n+\t\tif (nb_rx_queues > sas->rxq_count) {\n+\t\t\tmemset(&sas->rxq_info[sas->rxq_count], 0,\n+\t\t\t       (nb_rx_queues - sas->rxq_count) *\n+\t\t\t       sizeof(sas->rxq_info[0]));\n+\t\t\tmemset(&sa->rxq_ctrl[sas->rxq_count], 0,\n+\t\t\t       (nb_rx_queues - sas->rxq_count) *\n \t\t\t       sizeof(sa->rxq_ctrl[0]));\n \t\t}\n \t}\n \n-\twhile (sa->rxq_count < nb_rx_queues) {\n-\t\trc = sfc_rx_qinit_info(sa, sa->rxq_count);\n+\twhile (sas->rxq_count < nb_rx_queues) {\n+\t\trc = sfc_rx_qinit_info(sa, sas->rxq_count);\n \t\tif (rc != 0)\n \t\t\tgoto fail_rx_qinit_info;\n \n-\t\tsa->rxq_count++;\n+\t\tsas->rxq_count++;\n \t}\n \n configure_rss:\n \trss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?\n-\t\t\t MIN(sa->rxq_count, EFX_MAXRSS) : 0;\n+\t\t\t MIN(sas->rxq_count, EFX_MAXRSS) : 0;\n \n \tif (rss->channels > 0) {\n \t\tstruct rte_eth_rss_conf *adv_conf_rss;\n@@ -1607,6 +1612,6 @@ sfc_rx_close(struct sfc_adapter *sa)\n \tfree(sa->rxq_ctrl);\n \tsa->rxq_ctrl = NULL;\n \n-\trte_free(sa->rxq_info);\n-\tsa->rxq_info = NULL;\n+\trte_free(sfc_sa2shared(sa)->rxq_info);\n+\tsfc_sa2shared(sa)->rxq_info = NULL;\n }\n",
    "prefixes": [
        "24/30"
    ]
}