get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/104726/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 104726,
    "url": "http://patchwork.dpdk.org/api/patches/104726/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20211126195851.50167-1-jsoref@users.noreply.github.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211126195851.50167-1-jsoref@users.noreply.github.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211126195851.50167-1-jsoref@users.noreply.github.com",
    "date": "2021-11-26T19:58:51",
    "name": "Spelling",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "746a21f1ada8ea5b00a96a739a05c9b5283cc75c",
    "submitter": {
        "id": 2431,
        "url": "http://patchwork.dpdk.org/api/people/2431/?format=api",
        "name": "Josh Soref",
        "email": "jsoref@gmail.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20211126195851.50167-1-jsoref@users.noreply.github.com/mbox/",
    "series": [
        {
            "id": 20795,
            "url": "http://patchwork.dpdk.org/api/series/20795/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=20795",
            "date": "2021-11-26T19:58:51",
            "name": "Spelling",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/20795/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/104726/comments/",
    "check": "fail",
    "checks": "http://patchwork.dpdk.org/api/patches/104726/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A4207A0548;\n\tSun, 28 Nov 2021 13:57:03 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 9BC4242732;\n\tSun, 28 Nov 2021 13:56:56 +0100 (CET)",
            "from mail-qk1-f179.google.com (mail-qk1-f179.google.com\n [209.85.222.179])\n by mails.dpdk.org (Postfix) with ESMTP id 96510406FF\n for <dev@dpdk.org>; Fri, 26 Nov 2021 20:59:18 +0100 (CET)",
            "by mail-qk1-f179.google.com with SMTP id p4so15842762qkm.7\n for <dev@dpdk.org>; Fri, 26 Nov 2021 11:59:18 -0800 (PST)",
            "from localhost.localdomain ([69.196.142.83])\n by smtp.gmail.com with ESMTPSA id q6sm3817612qkl.106.2021.11.26.11.59.03\n (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128);\n Fri, 26 Nov 2021 11:59:14 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112;\n h=from:to:cc:subject:date:message-id:mime-version\n :content-transfer-encoding;\n bh=le9xIFiCZY4jtZQrIf+oy675dPBJOncr/iM8qG0Y+qc=;\n b=M0z5wv7MbSrGCZKdz4PIVwaKj2889GKtdlSmbeAtvlquu3+/XHysTEmL/FgPkkwM2b\n 2hzdkbKFJ0oYNLDVCNZi0eSgCyJzkXn+G6I2FBt+VatIRSojcbtRdlFB0xJX72VjA+Du\n exnPPIkwUEhOOlMOZtNWel0DBsxkcm9hkABCrTjLE2f/La5/P+xqzXwEHVYnowQvHX4a\n uOtHF5Uw/lklZKjm0LP3EUEr+bVNxJHGIiJBQYAXv2F/P8by7aPAPKgWgcyC0MlAf09H\n UHxlcObSCFh+TY1CM1TgEi0TPdn2ycdbMtRQRwsDm0EANmEYW1hV18oUzox4WpyAVbhm\n V+zQ==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20210112;\n h=x-gm-message-state:from:to:cc:subject:date:message-id:mime-version\n :content-transfer-encoding;\n bh=le9xIFiCZY4jtZQrIf+oy675dPBJOncr/iM8qG0Y+qc=;\n b=0/E0GVPt8UAGoGkJjAHpu8/0oQUHB5YGxeaL5K/KAxPSDmMP4+FmismQXukMf8lvnp\n vI0bsVn+0Y31lp5019Kwlg0r7pGX+UDH9dhsqLn4V1ki5Jo41ikpPgQ3YtnvPcWIOoBp\n FvU9xw/DgoMwBliUt40wm27iMqAxtqYVx/DMDiWt2MYHqAcko4k0JaWlsZ227HMBPhEW\n Ih3v9o/TqaMdv0BQaysqfY3VWwIJINlY8UV09QkxifL21ZDLo/LE1nnmdYKuM4m9UgQp\n NCqwgbzKTOoD2Irxl8vOt63yvKvJZKPzGtMT4i+Itz5YYFZAkZ523uHp1fcfwYrlJR6E\n yoSQ==",
        "X-Gm-Message-State": "AOAM533lZIlXkMhJcH4ThrufL50qDgk9ERt19p7ypHfPzgK6H66nbibs\n ixCqns6/+BossUNJ/ImpiSFbMYj3iMY=",
        "X-Google-Smtp-Source": "\n ABdhPJwy/ieOe35GQwkUjFGZyycQgEIB21GXv89Z+cQHrFXDjvZeQaZBorRUYJ6OnKbLkEt+gynmJg==",
        "X-Received": "by 2002:a05:620a:2a11:: with SMTP id\n o17mr23922101qkp.253.1637956755950;\n Fri, 26 Nov 2021 11:59:15 -0800 (PST)",
        "From": "Josh Soref <jsoref@gmail.com>",
        "X-Google-Original-From": "Josh Soref <jsoref@users.noreply.github.com>",
        "To": "dev@dpdk.org",
        "Cc": "Josh Soref <jsoref@users.noreply.github.com>",
        "Subject": "[PATCH] Spelling",
        "Date": "Fri, 26 Nov 2021 14:58:51 -0500",
        "Message-Id": "<20211126195851.50167-1-jsoref@users.noreply.github.com>",
        "X-Mailer": "git-send-email 2.30.1 (Apple Git-130)",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-Mailman-Approved-At": "Sun, 28 Nov 2021 13:56:54 +0100",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "* 0x hex\n* acceptable\n* account\n* accounting\n* accumulate\n* accumulates\n* acknowledge\n* action\n* actions\n* activate\n* active\n* actively\n* actually\n* adapter\n* adaptive\n* adding\n* address\n* adjusted\n* aggregator\n* aggregators\n* aggressive\n* algorithm\n* alignment\n* allocate\n* allocated\n* allocation\n* alphabetically\n* although\n* always\n* annotation\n* approx\n* approximate\n* arbitrary\n* archive\n* argument\n* array\n* associated\n* assumption\n* attached\n* attempts\n* attributes\n* authentic\n* authentication\n* autogreen\n* available\n* average\n* backplane\n* backtracking\n* bandwidth\n* barrier\n* basically\n* bearer\n* before\n* begin\n* beginning\n* beyond\n* biggest\n* boundaries\n* buffer\n* buffers\n* but\n* by\n* calculate\n* calculations\n* cannot\n* capabilities\n* capability\n* chained\n* chaining\n* characteristics\n* checked\n* checks\n* checksum\n* choices\n* chunk\n* cipher\n* classes\n* classification\n* classifier\n* coalescing\n* command\n* commit\n* communicate\n* comparison\n* compatibility\n* completion\n* config\n* configuration\n* configurations\n* configure\n* configured\n* configuring\n* congestion\n* connected\n* connection\n* consecutive\n* constant\n* consumed\n* consumer\n* container\n* containing\n* contrary\n* conversion\n* corresponds\n* corruption\n* corrupts\n* couldn't\n* cprintf\n* crypto\n* cryptographic\n* current\n* currently\n* cycles\n* datapath\n* datastructure\n* decapsulation\n* default\n* deferred\n* defined\n* definition\n* definitions\n* deinitialization\n* delete\n* deletion\n* demonstrates\n* demonstrating\n* dependent\n* depends\n* dequeuing\n* derived\n* described\n* descriptor\n* descriptors\n* destination\n* destroy\n* destroying\n* detach\n* determine\n* determined\n* device\n* diagrams\n* differentiate\n* director\n* discard\n* discrimination\n* distributing\n* distribution\n* divergent\n* domain\n* doorbell\n* dropper\n* duplex\n* duplicate\n* effective\n* efficient\n* element\n* empty\n* enable\n* enabled\n* enabling\n* encapsulate\n* encoding\n* endian\n* enough\n* enqueued\n* entries\n* entry\n* equally\n* errno\n* erroneous\n* error\n* ethertype\n* exceed\n* exceeds\n* exclusively\n* executed\n* exede\n* exhaustion\n* expansion\n* expected\n* expects\n* experimental\n* expiry\n* explicit\n* extended\n* externally\n* failed\n* fairness\n* fallen\n* feature\n* fiber\n* fields\n* filters\n* filters' info\n* firmware\n* forwarding\n* fragment\n* framework\n* free\n* frequencies\n* frequency\n* functionality\n* further\n* generate\n* generator\n* geneve\n* global\n* greater\n* groupid\n* grpmask\n* guaranteed\n* handler\n* hardware\n* hash\n* header\n* hexadecimal\n* hierarchical\n* identical\n* identification\n* identifier\n* identifies\n* ignore\n* ignoring\n* immutable\n* implicitly\n* important\n* imprecise\n* inconsistent\n* incremented\n* incrementing\n* index\n* indexed\n* indicate\n* indicates\n* indication\n* individual\n* infiniband\n* information\n* inherent\n* inherited\n* initialization\n* initialize\n* insert\n* inserted\n* instant\n* instincts\n* instructions\n* insufficient\n* integrity\n* interim\n* internal\n* interrupt\n* interrupts\n* interval\n* intrinsics\n* invalid\n* involves\n* joining\n* kernel\n* lapsed\n* legacy\n* length\n* lengths\n* license\n* limited\n* little\n* lookup\n* loopback\n* mantissa\n* mapping\n* maximal\n* maximum\n* maxmsivector\n* measure\n* memory\n* message\n* metadata\n* metrics\n* minimize\n* mismatch\n* mmappable\n* mmapped\n* modify\n* moment\n* monitor\n* mult\n* multicore\n* multimatch\n* multiple\n* negative\n* negotiation\n* nonexistent\n* notification\n* notifications\n* observed\n* occupied\n* occurs\n* octeontx\n* odpvalid\n* offload\n* offloads\n* offset\n* operation\n* operations\n* order\n* other than\n* otherwise\n* overflow\n* overhead\n* override\n* overwritten\n* packet\n* packets\n* palladium\n* parallelism\n* parameter\n* parameters\n* partner\n* passive\n* pause\n* peer\n* pending\n* per port\n* period\n* permanent\n* personalities\n* physical\n* platform\n* pointer\n* points\n* policies\n* policy\n* polynomials\n* populate\n* portctl\n* postponed\n* preemptible\n* preference\n* prefetch\n* prefetching\n* prefix\n* preparation\n* prerequisite\n* present\n* preserve\n* previous\n* primary\n* prior\n* priorities\n* priority\n* probability\n* processed\n* processing\n* prodding\n* profile\n* programmatically\n* promisc\n* promiscuous\n* properties\n* protocol\n* provisioned\n* qgroup\n* quantity\n* queue\n* queueable\n* queues\n* quiescent\n* reasonably\n* reassemble\n* reassembly\n* recalculate\n* receive\n* receiving\n* recommended\n* redundant\n* reflected\n* register\n* registering\n* registers\n* registration\n* regular\n* release\n* relevant\n* remapped\n* removed\n* replace\n* request\n* required\n* reserved\n* resettable\n* resolution\n* resource\n* resources\n* respectively\n* response\n* restoration\n* resulting\n* results\n* retransmission\n* retrieval\n* retrieve\n* retrieving\n* return\n* revision\n* robust\n* routes\n* routines\n* scatter\n* scattered\n* scenario\n* schedule\n* search\n* searching\n* second\n* segment\n* segregating\n* selected\n* sequence\n* series\n* service\n* session\n* shaping\n* shift\n* signature\n* similar\n* simplify\n* simultaneously\n* single\n* situation\n* skeleton\n* slave\n* something\n* specific\n* specification\n* specified\n* specifies\n* staging\n* standalone\n* standard\n* state\n* statically\n* statistics\n* status for\n* strategy\n* string\n* structure\n* structures\n* submission\n* subsystem\n* subtraction\n* succeeded\n* successful\n* successfully\n* supplied\n* support\n* supported\n* synchronization\n* synthetic\n* tagged\n* technique\n* template\n* tentatively\n* terminate\n* termination\n* threshold\n* ticketlock\n* ticks\n* timestamp\n* together\n* token\n* traffic\n* translation\n* transmit\n* truncate\n* tunnel\n* tuple\n* typically\n* unavailable\n* unconditionally\n* unexpectedly\n* unfortunately\n* uninitialize\n* unrecognizable\n* unrecognized\n* unregistering\n* unsupported\n* until\n* up\n* update\n* usage\n* validation\n* values\n* variables\n* vector\n* vectors\n* verification\n* verify\n* violation\n* virtchnl\n* warnings\n* weight\n* where\n* wherever\n* whether\n* without\n* workaround\n* worker\n* written\n* xstats\n* zeroed\n\nSigned-off-by: Josh Soref <jsoref@users.noreply.github.com>\n---\n app/proc-info/main.c                          |  6 +-\n app/test-acl/main.c                           |  6 +-\n .../comp_perf_test_cyclecount.c               |  2 +-\n .../comp_perf_test_throughput.c               |  2 +-\n .../comp_perf_test_verify.c                   |  2 +-\n app/test-compress-perf/main.c                 |  2 +-\n .../cperf_test_pmd_cyclecount.c               |  2 +-\n app/test-crypto-perf/cperf_test_vectors.h     |  4 +-\n app/test-eventdev/evt_options.c               |  2 +-\n app/test-eventdev/test_order_common.c         |  2 +-\n app/test-fib/main.c                           |  4 +-\n app/test-flow-perf/config.h                   |  2 +-\n app/test-flow-perf/main.c                     |  2 +-\n app/test-pmd/cmdline.c                        |  2 +-\n app/test-pmd/cmdline_flow.c                   |  6 +-\n app/test-pmd/cmdline_tm.c                     |  4 +-\n app/test-pmd/csumonly.c                       |  2 +-\n app/test-pmd/parameters.c                     |  2 +-\n app/test-pmd/testpmd.c                        |  2 +-\n app/test-pmd/txonly.c                         |  4 +-\n app/test/test_barrier.c                       |  2 +-\n app/test/test_bpf.c                           |  4 +-\n app/test/test_compressdev.c                   |  2 +-\n app/test/test_cryptodev.c                     |  2 +-\n app/test/test_fib_perf.c                      |  2 +-\n app/test/test_kni.c                           |  4 +-\n app/test/test_kvargs.c                        | 16 ++--\n app/test/test_link_bonding.c                  |  4 +-\n app/test/test_link_bonding_mode4.c            | 30 +++----\n app/test/test_lpm6_data.h                     |  2 +-\n app/test/test_member.c                        |  2 +-\n app/test/test_mempool.c                       |  4 +-\n app/test/test_memzone.c                       |  6 +-\n app/test/test_metrics.c                       |  2 +-\n app/test/test_pcapng.c                        |  2 +-\n app/test/test_power_cpufreq.c                 |  2 +-\n app/test/test_rcu_qsbr.c                      |  4 +-\n app/test/test_red.c                           |  8 +-\n app/test/test_security.c                      |  2 +-\n app/test/test_table.h                         |  2 +-\n app/test/test_table_pipeline.c                |  2 +-\n app/test/test_thash.c                         |  2 +-\n buildtools/binutils-avx512-check.py           |  2 +-\n devtools/check-symbol-change.sh               |  6 +-\n .../virtio_user_for_container_networking.svg  |  2 +-\n doc/guides/nics/af_packet.rst                 |  2 +-\n doc/guides/nics/mlx4.rst                      |  2 +-\n doc/guides/nics/mlx5.rst                      |  6 +-\n doc/guides/prog_guide/cryptodev_lib.rst       |  2 +-\n .../prog_guide/env_abstraction_layer.rst      |  4 +-\n doc/guides/prog_guide/img/turbo_tb_decode.svg |  2 +-\n doc/guides/prog_guide/img/turbo_tb_encode.svg |  2 +-\n doc/guides/prog_guide/qos_framework.rst       |  6 +-\n doc/guides/prog_guide/rte_flow.rst            |  2 +-\n doc/guides/rawdevs/cnxk_bphy.rst              |  2 +-\n doc/guides/regexdevs/features_overview.rst    |  2 +-\n doc/guides/rel_notes/release_16_07.rst        |  2 +-\n doc/guides/rel_notes/release_17_08.rst        |  2 +-\n doc/guides/rel_notes/release_2_1.rst          |  2 +-\n doc/guides/sample_app_ug/ip_reassembly.rst    |  2 +-\n doc/guides/sample_app_ug/l2_forward_cat.rst   |  2 +-\n doc/guides/sample_app_ug/server_node_efd.rst  |  2 +-\n doc/guides/sample_app_ug/skeleton.rst         |  2 +-\n .../sample_app_ug/vm_power_management.rst     |  2 +-\n doc/guides/testpmd_app_ug/testpmd_funcs.rst   |  2 +-\n drivers/baseband/acc100/rte_acc100_pmd.c      | 24 +++---\n drivers/baseband/fpga_lte_fec/fpga_lte_fec.c  |  8 +-\n drivers/baseband/null/bbdev_null.c            |  2 +-\n .../baseband/turbo_sw/bbdev_turbo_software.c  |  2 +-\n drivers/bus/dpaa/dpaa_bus.c                   |  2 +-\n drivers/bus/dpaa/include/fsl_qman.h           |  6 +-\n drivers/bus/dpaa/include/fsl_usd.h            |  2 +-\n drivers/bus/dpaa/include/process.h            |  2 +-\n drivers/bus/fslmc/fslmc_bus.c                 |  2 +-\n drivers/bus/fslmc/fslmc_vfio.h                |  2 +-\n drivers/bus/fslmc/portal/dpaa2_hw_dpio.c      |  2 +-\n drivers/bus/fslmc/portal/dpaa2_hw_pvt.h       |  2 +-\n .../fslmc/qbman/include/fsl_qbman_portal.h    | 20 ++---\n drivers/bus/pci/linux/pci_vfio.c              |  2 +-\n drivers/bus/vdev/rte_bus_vdev.h               |  2 +-\n drivers/bus/vmbus/vmbus_common.c              |  2 +-\n drivers/common/cnxk/roc_bphy_cgx.c            |  2 +-\n drivers/common/cnxk/roc_cpt.c                 | 10 +--\n drivers/common/cnxk/roc_cpt_priv.h            |  2 +-\n drivers/common/cnxk/roc_mbox.h                |  4 +-\n drivers/common/cnxk/roc_nix_bpf.c             | 22 ++---\n drivers/common/cnxk/roc_nix_tm_ops.c          |  2 +-\n drivers/common/cnxk/roc_npc_mcam.c            |  2 +-\n drivers/common/cnxk/roc_npc_priv.h            |  2 +-\n drivers/common/cnxk/roc_tim.c                 |  2 +-\n drivers/common/cpt/cpt_ucode.h                |  4 +-\n drivers/common/cpt/cpt_ucode_asym.h           |  2 +-\n drivers/common/dpaax/caamflib/desc/algo.h     |  2 +-\n drivers/common/dpaax/caamflib/desc/ipsec.h    |  4 +-\n drivers/common/dpaax/caamflib/desc/sdap.h     |  6 +-\n .../common/dpaax/caamflib/rta/operation_cmd.h |  6 +-\n drivers/common/dpaax/dpaax_iova_table.c       |  2 +-\n drivers/common/iavf/iavf_type.h               |  2 +-\n drivers/common/iavf/virtchnl.h                |  2 +-\n drivers/common/mlx5/mlx5_common.c             |  2 +-\n drivers/common/mlx5/mlx5_common_mr.c          |  2 +-\n drivers/common/mlx5/mlx5_devx_cmds.c          |  2 +-\n drivers/common/mlx5/mlx5_devx_cmds.h          |  2 +-\n drivers/common/mlx5/mlx5_malloc.c             |  4 +-\n drivers/common/mlx5/mlx5_malloc.h             |  2 +-\n drivers/common/mlx5/mlx5_prm.h                |  6 +-\n drivers/common/mlx5/windows/mlx5_common_os.c  |  4 +-\n drivers/common/mlx5/windows/mlx5_common_os.h  |  2 +-\n drivers/common/octeontx2/otx2_mbox.h          |  4 +-\n .../qat/qat_adf/adf_transport_access_macros.h |  2 +-\n drivers/common/sfc_efx/efsys.h                |  2 +-\n drivers/compress/octeontx/include/zip_regs.h  |  4 +-\n drivers/compress/octeontx/otx_zip.h           |  4 +-\n drivers/compress/qat/dev/qat_comp_pmd_gen1.c  |  4 +-\n drivers/compress/qat/qat_comp.c               | 12 +--\n drivers/compress/qat/qat_comp_pmd.c           |  8 +-\n drivers/compress/qat/qat_comp_pmd.h           |  2 +-\n drivers/crypto/bcmfs/bcmfs_device.h           |  2 +-\n drivers/crypto/bcmfs/bcmfs_qp.c               |  2 +-\n drivers/crypto/bcmfs/bcmfs_sym_defs.h         |  6 +-\n drivers/crypto/bcmfs/bcmfs_sym_engine.h       |  2 +-\n drivers/crypto/bcmfs/hw/bcmfs5_rm.c           |  2 +-\n drivers/crypto/caam_jr/caam_jr.c              |  4 +-\n drivers/crypto/caam_jr/caam_jr_hw_specific.h  |  4 +-\n drivers/crypto/caam_jr/caam_jr_pvt.h          |  4 +-\n drivers/crypto/caam_jr/caam_jr_uio.c          |  2 +-\n drivers/crypto/ccp/ccp_crypto.c               |  2 +-\n drivers/crypto/ccp/ccp_crypto.h               |  2 +-\n drivers/crypto/ccp/ccp_dev.h                  |  2 +-\n drivers/crypto/cnxk/cnxk_cryptodev_ops.h      |  4 +-\n drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c   | 30 +++----\n drivers/crypto/dpaa_sec/dpaa_sec.c            |  4 +-\n .../crypto/octeontx/otx_cryptodev_hw_access.c |  2 +-\n drivers/crypto/octeontx/otx_cryptodev_mbox.h  |  2 +-\n drivers/crypto/octeontx/otx_cryptodev_ops.c   |  2 +-\n drivers/crypto/qat/qat_asym.c                 |  2 +-\n drivers/crypto/qat/qat_sym.c                  |  2 +-\n drivers/crypto/qat/qat_sym_session.h          |  2 +-\n drivers/crypto/virtio/virtio_cryptodev.c      |  6 +-\n drivers/crypto/virtio/virtqueue.c             |  2 +-\n drivers/crypto/virtio/virtqueue.h             |  4 +-\n drivers/dma/ioat/ioat_dmadev.c                |  2 +-\n drivers/dma/ioat/ioat_hw_defs.h               |  2 +-\n drivers/dma/skeleton/skeleton_dmadev.c        |  2 +-\n drivers/event/cnxk/cnxk_eventdev_selftest.c   |  4 +-\n drivers/event/dlb2/dlb2.c                     |  2 +-\n drivers/event/dlb2/dlb2_priv.h                |  2 +-\n drivers/event/dlb2/dlb2_selftest.c            |  2 +-\n drivers/event/dlb2/rte_pmd_dlb2.h             |  2 +-\n drivers/event/dpaa2/dpaa2_eventdev_selftest.c |  2 +-\n drivers/event/dsw/dsw_evdev.h                 |  4 +-\n drivers/event/dsw/dsw_event.c                 |  4 +-\n drivers/event/octeontx/ssovf_evdev.h          |  2 +-\n drivers/event/octeontx/ssovf_evdev_selftest.c |  2 +-\n drivers/event/octeontx2/otx2_evdev_selftest.c |  2 +-\n drivers/event/octeontx2/otx2_tim_evdev.c      |  4 +-\n drivers/event/octeontx2/otx2_worker_dual.h    |  2 +-\n drivers/event/opdl/opdl_evdev.c               |  2 +-\n drivers/event/opdl/opdl_test.c                |  2 +-\n drivers/event/sw/sw_evdev.h                   |  2 +-\n drivers/event/sw/sw_evdev_selftest.c          |  2 +-\n drivers/mempool/dpaa/dpaa_mempool.c           |  2 +-\n drivers/mempool/octeontx/octeontx_fpavf.c     |  4 +-\n drivers/net/ark/ark_ethdev.c                  |  4 +-\n drivers/net/ark/ark_global.h                  |  2 +-\n drivers/net/ark/ark_rqp.c                     |  4 +-\n drivers/net/ark/ark_rqp.h                     |  4 +-\n drivers/net/atlantic/atl_ethdev.c             |  2 +-\n drivers/net/atlantic/atl_rxtx.c               |  2 +-\n drivers/net/atlantic/hw_atl/hw_atl_b0.c       |  2 +-\n drivers/net/axgbe/axgbe_dev.c                 |  2 +-\n drivers/net/axgbe/axgbe_ethdev.c              |  2 +-\n drivers/net/axgbe/axgbe_ethdev.h              |  2 +-\n drivers/net/axgbe/axgbe_phy_impl.c            |  4 +-\n drivers/net/axgbe/axgbe_rxtx_vec_sse.c        |  2 +-\n drivers/net/bnx2x/bnx2x.c                     | 56 ++++++------\n drivers/net/bnx2x/bnx2x.h                     | 16 ++--\n drivers/net/bnx2x/bnx2x_stats.c               | 10 +--\n drivers/net/bnx2x/bnx2x_stats.h               |  8 +-\n drivers/net/bnx2x/bnx2x_vfpf.c                |  2 +-\n drivers/net/bnx2x/bnx2x_vfpf.h                |  2 +-\n drivers/net/bnx2x/ecore_fw_defs.h             |  2 +-\n drivers/net/bnx2x/ecore_hsi.h                 | 54 ++++++------\n drivers/net/bnx2x/ecore_init.h                |  2 +-\n drivers/net/bnx2x/ecore_init_ops.h            |  6 +-\n drivers/net/bnx2x/ecore_reg.h                 | 40 ++++-----\n drivers/net/bnx2x/ecore_sp.c                  | 42 ++++-----\n drivers/net/bnx2x/ecore_sp.h                  |  8 +-\n drivers/net/bnx2x/elink.c                     | 52 +++++------\n drivers/net/bnx2x/elink.h                     |  2 +-\n drivers/net/bnxt/bnxt_hwrm.c                  |  2 +-\n drivers/net/bnxt/tf_core/tfp.c                |  2 +-\n drivers/net/bnxt/tf_core/tfp.h                |  2 +-\n drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c    |  4 +-\n drivers/net/bonding/eth_bond_8023ad_private.h |  2 +-\n drivers/net/bonding/eth_bond_private.h        |  2 +-\n drivers/net/bonding/rte_eth_bond_8023ad.c     | 20 ++---\n drivers/net/bonding/rte_eth_bond_8023ad.h     |  4 +-\n drivers/net/bonding/rte_eth_bond_alb.h        |  2 +-\n drivers/net/bonding/rte_eth_bond_api.c        |  4 +-\n drivers/net/cnxk/cn10k_ethdev.h               |  2 +-\n drivers/net/cnxk/cn10k_tx.h                   |  6 +-\n drivers/net/cnxk/cn9k_tx.h                    |  6 +-\n drivers/net/cnxk/cnxk_ptp.c                   |  2 +-\n drivers/net/cxgbe/cxgbe_flow.c                |  2 +-\n drivers/net/cxgbe/cxgbevf_main.c              |  2 +-\n drivers/net/cxgbe/sge.c                       |  8 +-\n drivers/net/dpaa/dpaa_ethdev.c                |  6 +-\n drivers/net/dpaa/dpaa_rxtx.c                  |  4 +-\n drivers/net/dpaa/fmlib/fm_ext.h               |  2 +-\n drivers/net/dpaa/fmlib/fm_pcd_ext.h           |  8 +-\n drivers/net/dpaa/fmlib/fm_port_ext.h          | 18 ++--\n drivers/net/dpaa2/dpaa2_ethdev.c              | 14 +--\n drivers/net/dpaa2/dpaa2_ethdev.h              |  2 +-\n drivers/net/dpaa2/dpaa2_flow.c                | 12 +--\n drivers/net/dpaa2/dpaa2_mux.c                 |  6 +-\n drivers/net/dpaa2/dpaa2_rxtx.c                |  6 +-\n drivers/net/dpaa2/mc/dpdmux.c                 |  8 +-\n drivers/net/dpaa2/mc/fsl_dpdmux.h             |  4 +-\n drivers/net/dpaa2/mc/fsl_dpni.h               | 10 +--\n drivers/net/e1000/e1000_ethdev.h              |  4 +-\n drivers/net/e1000/em_ethdev.c                 | 10 +--\n drivers/net/e1000/em_rxtx.c                   |  6 +-\n drivers/net/e1000/igb_ethdev.c                | 18 ++--\n drivers/net/e1000/igb_flow.c                  |  4 +-\n drivers/net/e1000/igb_pf.c                    |  2 +-\n drivers/net/e1000/igb_rxtx.c                  | 14 +--\n drivers/net/ena/ena_ethdev.c                  |  2 +-\n drivers/net/ena/ena_ethdev.h                  |  2 +-\n drivers/net/enetfec/enet_regs.h               |  2 +-\n drivers/net/enic/enic_flow.c                  | 64 +++++++-------\n drivers/net/enic/enic_fm_flow.c               | 10 +--\n drivers/net/enic/enic_main.c                  |  2 +-\n drivers/net/enic/enic_rxtx.c                  |  2 +-\n drivers/net/fm10k/fm10k.h                     |  2 +-\n drivers/net/fm10k/fm10k_ethdev.c              | 12 +--\n drivers/net/fm10k/fm10k_rxtx_vec.c            | 10 +--\n drivers/net/hinic/hinic_pmd_ethdev.c          |  4 +-\n drivers/net/hinic/hinic_pmd_ethdev.h          |  2 +-\n drivers/net/hinic/hinic_pmd_flow.c            |  8 +-\n drivers/net/hinic/hinic_pmd_rx.c              | 30 +++----\n drivers/net/hinic/hinic_pmd_tx.c              |  2 +-\n drivers/net/hns3/hns3_cmd.c                   |  4 +-\n drivers/net/hns3/hns3_common.c                |  2 +-\n drivers/net/hns3/hns3_dcb.c                   | 86 +++++++++----------\n drivers/net/hns3/hns3_dcb.h                   | 24 +++---\n drivers/net/hns3/hns3_ethdev.c                | 32 +++----\n drivers/net/hns3/hns3_ethdev.h                |  8 +-\n drivers/net/hns3/hns3_ethdev_vf.c             |  6 +-\n drivers/net/hns3/hns3_fdir.c                  |  4 +-\n drivers/net/hns3/hns3_fdir.h                  |  2 +-\n drivers/net/hns3/hns3_flow.c                  | 12 +--\n drivers/net/hns3/hns3_mbx.c                   |  4 +-\n drivers/net/hns3/hns3_mbx.h                   |  2 +-\n drivers/net/hns3/hns3_rss.h                   |  2 +-\n drivers/net/hns3/hns3_rxtx.c                  | 16 ++--\n drivers/net/hns3/hns3_rxtx.h                  |  2 +-\n drivers/net/hns3/hns3_stats.c                 |  4 +-\n drivers/net/hns3/hns3_tm.c                    |  4 +-\n drivers/net/i40e/i40e_ethdev.c                | 12 +--\n drivers/net/i40e/i40e_ethdev.h                | 10 +--\n drivers/net/i40e/i40e_fdir.c                  | 10 +--\n drivers/net/i40e/i40e_flow.c                  |  2 +-\n drivers/net/i40e/i40e_pf.c                    |  4 +-\n drivers/net/i40e/i40e_rxtx.c                  | 20 ++---\n drivers/net/i40e/i40e_rxtx_vec_altivec.c      |  2 +-\n drivers/net/i40e/i40e_rxtx_vec_neon.c         |  4 +-\n drivers/net/i40e/i40e_rxtx_vec_sse.c          |  6 +-\n drivers/net/i40e/rte_pmd_i40e.c               |  2 +-\n drivers/net/iavf/iavf_ethdev.c                | 14 +--\n drivers/net/iavf/iavf_hash.c                  |  6 +-\n drivers/net/iavf/iavf_ipsec_crypto.c          | 18 ++--\n drivers/net/iavf/iavf_ipsec_crypto.h          |  6 +-\n drivers/net/iavf/iavf_rxtx.c                  |  4 +-\n drivers/net/iavf/iavf_rxtx_vec_sse.c          |  4 +-\n drivers/net/iavf/iavf_vchnl.c                 |  4 +-\n drivers/net/ice/ice_dcf.c                     |  2 +-\n drivers/net/ice/ice_dcf_ethdev.c              |  2 +-\n drivers/net/ice/ice_ethdev.c                  | 12 +--\n drivers/net/ice/ice_rxtx.c                    | 10 +--\n drivers/net/ice/ice_rxtx_vec_sse.c            |  4 +-\n drivers/net/ice/ice_switch_filter.c           | 26 +++---\n drivers/net/igc/igc_filter.c                  |  2 +-\n drivers/net/igc/igc_txrx.c                    |  4 +-\n drivers/net/ionic/ionic_if.h                  |  6 +-\n drivers/net/ipn3ke/ipn3ke_ethdev.c            |  2 +-\n drivers/net/ipn3ke/ipn3ke_ethdev.h            |  4 +-\n drivers/net/ipn3ke/ipn3ke_flow.c              |  2 +-\n drivers/net/ipn3ke/ipn3ke_representor.c       | 12 +--\n drivers/net/ipn3ke/ipn3ke_tm.c                |  4 +-\n drivers/net/ipn3ke/meson.build                |  2 +-\n drivers/net/ixgbe/ixgbe_bypass.c              |  2 +-\n drivers/net/ixgbe/ixgbe_bypass_api.h          |  4 +-\n drivers/net/ixgbe/ixgbe_ethdev.c              | 18 ++--\n drivers/net/ixgbe/ixgbe_ethdev.h              |  2 +-\n drivers/net/ixgbe/ixgbe_fdir.c                |  2 +-\n drivers/net/ixgbe/ixgbe_flow.c                |  4 +-\n drivers/net/ixgbe/ixgbe_ipsec.c               |  2 +-\n drivers/net/ixgbe/ixgbe_pf.c                  |  2 +-\n drivers/net/ixgbe/ixgbe_rxtx.c                | 10 +--\n drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c        |  2 +-\n drivers/net/memif/memif_socket.c              |  2 +-\n drivers/net/memif/rte_eth_memif.c             |  2 +-\n drivers/net/mlx4/mlx4.h                       |  2 +-\n drivers/net/mlx4/mlx4_ethdev.c                |  2 +-\n drivers/net/mlx5/linux/mlx5_os.c              |  8 +-\n drivers/net/mlx5/mlx5.c                       | 12 +--\n drivers/net/mlx5/mlx5.h                       | 12 +--\n drivers/net/mlx5/mlx5_flow.c                  | 28 +++---\n drivers/net/mlx5/mlx5_flow.h                  |  6 +-\n drivers/net/mlx5/mlx5_flow_aso.c              |  8 +-\n drivers/net/mlx5/mlx5_flow_dv.c               | 56 ++++++------\n drivers/net/mlx5/mlx5_flow_flex.c             |  4 +-\n drivers/net/mlx5/mlx5_flow_meter.c            | 16 ++--\n drivers/net/mlx5/mlx5_rx.c                    |  2 +-\n drivers/net/mlx5/mlx5_rxq.c                   |  4 +-\n drivers/net/mlx5/mlx5_rxtx_vec_altivec.h      |  2 +-\n drivers/net/mlx5/mlx5_rxtx_vec_neon.h         |  2 +-\n drivers/net/mlx5/mlx5_rxtx_vec_sse.h          |  2 +-\n drivers/net/mlx5/mlx5_tx.c                    |  2 +-\n drivers/net/mlx5/mlx5_utils.h                 |  2 +-\n drivers/net/mlx5/windows/mlx5_flow_os.c       |  2 +-\n drivers/net/mlx5/windows/mlx5_os.c            |  2 +-\n drivers/net/mvneta/mvneta_ethdev.c            |  2 +-\n drivers/net/mvpp2/mrvl_ethdev.c               |  2 +-\n drivers/net/mvpp2/mrvl_qos.c                  |  4 +-\n drivers/net/netvsc/hn_nvs.c                   |  2 +-\n drivers/net/netvsc/hn_rxtx.c                  |  4 +-\n drivers/net/netvsc/hn_vf.c                    |  2 +-\n .../net/nfp/nfpcore/nfp-common/nfp_resid.h    |  6 +-\n drivers/net/nfp/nfpcore/nfp_cppcore.c         |  2 +-\n drivers/net/nfp/nfpcore/nfp_nsp.h             |  2 +-\n drivers/net/nfp/nfpcore/nfp_resource.c        |  2 +-\n drivers/net/nfp/nfpcore/nfp_rtsym.c           |  2 +-\n drivers/net/ngbe/ngbe_ethdev.c                | 14 +--\n drivers/net/ngbe/ngbe_pf.c                    |  2 +-\n drivers/net/octeontx/octeontx_ethdev.c        |  2 +-\n drivers/net/octeontx2/otx2_ethdev_irq.c       |  2 +-\n drivers/net/octeontx2/otx2_ptp.c              |  2 +-\n drivers/net/octeontx2/otx2_tx.h               |  4 +-\n drivers/net/octeontx2/otx2_vlan.c             |  2 +-\n drivers/net/octeontx_ep/otx2_ep_vf.c          |  2 +-\n drivers/net/octeontx_ep/otx_ep_vf.c           |  2 +-\n drivers/net/pfe/pfe_ethdev.c                  |  6 +-\n drivers/net/pfe/pfe_hal.c                     |  2 +-\n drivers/net/pfe/pfe_hif.c                     |  4 +-\n drivers/net/pfe/pfe_hif.h                     |  2 +-\n drivers/net/pfe/pfe_hif_lib.c                 |  8 +-\n drivers/net/qede/qede_debug.c                 | 14 +--\n drivers/net/qede/qede_ethdev.c                |  2 +-\n drivers/net/qede/qede_rxtx.c                  | 12 +--\n drivers/net/qede/qede_rxtx.h                  |  2 +-\n drivers/net/sfc/sfc.c                         |  2 +-\n drivers/net/sfc/sfc_dp.c                      |  2 +-\n drivers/net/sfc/sfc_dp_rx.h                   |  4 +-\n drivers/net/sfc/sfc_ef100.h                   |  2 +-\n drivers/net/sfc/sfc_ef100_rx.c                |  2 +-\n drivers/net/sfc/sfc_ef10_essb_rx.c            |  2 +-\n drivers/net/sfc/sfc_ef10_rx_ev.h              |  2 +-\n drivers/net/sfc/sfc_intr.c                    |  2 +-\n drivers/net/sfc/sfc_mae.c                     | 50 +++++------\n drivers/net/sfc/sfc_rx.c                      |  6 +-\n drivers/net/sfc/sfc_tso.h                     | 10 +--\n drivers/net/sfc/sfc_tx.c                      |  2 +-\n drivers/net/softnic/rte_eth_softnic_flow.c    |  2 +-\n drivers/net/tap/rte_eth_tap.c                 |  2 +-\n drivers/net/tap/tap_bpf_api.c                 |  4 +-\n drivers/net/tap/tap_flow.c                    |  4 +-\n drivers/net/thunderx/nicvf_svf.c              |  2 +-\n drivers/net/txgbe/txgbe_ethdev.c              | 14 +--\n drivers/net/txgbe/txgbe_ethdev_vf.c           |  6 +-\n drivers/net/txgbe/txgbe_ipsec.c               |  2 +-\n drivers/net/txgbe/txgbe_pf.c                  |  2 +-\n drivers/net/virtio/virtio_ethdev.c            |  4 +-\n drivers/net/virtio/virtio_pci.c               |  2 +-\n drivers/net/virtio/virtio_rxtx.c              |  2 +-\n drivers/net/virtio/virtio_rxtx_packed_avx.h   |  2 +-\n drivers/net/virtio/virtqueue.c                |  2 +-\n drivers/net/virtio/virtqueue.h                |  4 +-\n drivers/raw/dpaa2_qdma/dpaa2_qdma.c           |  2 +-\n drivers/raw/dpaa2_qdma/dpaa2_qdma.h           |  4 +-\n drivers/raw/ifpga/ifpga_rawdev.c              | 10 +--\n drivers/raw/ioat/ioat_rawdev.c                |  2 +-\n drivers/raw/ioat/ioat_spec.h                  |  2 +-\n drivers/raw/ntb/ntb.h                         |  2 +-\n drivers/regex/mlx5/mlx5_regex_fastpath.c      |  2 +-\n drivers/vdpa/mlx5/mlx5_vdpa_mem.c             |  2 +-\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c           |  2 +-\n examples/bbdev_app/main.c                     |  2 +-\n examples/bond/main.c                          |  4 +-\n examples/dma/dmafwd.c                         |  2 +-\n examples/ethtool/lib/rte_ethtool.c            |  2 +-\n examples/ethtool/lib/rte_ethtool.h            |  4 +-\n examples/ip_reassembly/main.c                 |  4 +-\n examples/ipsec-secgw/event_helper.c           |  2 +-\n examples/ipsec-secgw/ipsec-secgw.c            | 14 +--\n examples/ipsec-secgw/sa.c                     |  6 +-\n examples/ipsec-secgw/sp4.c                    |  2 +-\n examples/ipsec-secgw/sp6.c                    |  2 +-\n examples/ipsec-secgw/test/common_defs.sh      |  4 +-\n examples/kni/main.c                           |  2 +-\n examples/l2fwd-cat/l2fwd-cat.c                |  2 +-\n examples/l2fwd-event/l2fwd_event_generic.c    |  2 +-\n .../l2fwd-event/l2fwd_event_internal_port.c   |  2 +-\n examples/l2fwd-jobstats/main.c                |  2 +-\n examples/l3fwd-acl/main.c                     |  6 +-\n examples/l3fwd-power/main.c                   |  4 +-\n examples/l3fwd/l3fwd_common.h                 |  4 +-\n examples/l3fwd/l3fwd_neon.h                   |  2 +-\n examples/l3fwd/l3fwd_sse.h                    |  2 +-\n examples/multi_process/hotplug_mp/commands.c  |  2 +-\n examples/multi_process/simple_mp/main.c       |  2 +-\n examples/multi_process/symmetric_mp/main.c    |  2 +-\n examples/ntb/ntb_fwd.c                        |  2 +-\n examples/packet_ordering/main.c               |  2 +-\n examples/performance-thread/common/lthread.c  |  6 +-\n .../performance-thread/common/lthread_diag.c  |  2 +-\n .../performance-thread/common/lthread_int.h   |  2 +-\n .../performance-thread/common/lthread_tls.c   |  2 +-\n .../performance-thread/l3fwd-thread/main.c    | 12 +--\n .../pthread_shim/pthread_shim.h               |  2 +-\n examples/pipeline/examples/registers.spec     |  2 +-\n examples/qos_sched/cmdline.c                  |  2 +-\n examples/server_node_efd/node/node.c          |  2 +-\n examples/skeleton/basicfwd.c                  |  2 +-\n examples/vhost/main.c                         | 10 +--\n examples/vhost/virtio_net.c                   | 50 +++++------\n examples/vm_power_manager/channel_monitor.c   |  2 +-\n examples/vm_power_manager/power_manager.h     |  2 +-\n examples/vmdq/main.c                          |  2 +-\n kernel/linux/kni/kni_fifo.h                   |  2 +-\n lib/acl/acl_bld.c                             |  2 +-\n lib/acl/acl_run_altivec.h                     |  2 +-\n lib/acl/acl_run_avx512.c                      |  2 +-\n lib/acl/acl_run_avx512x16.h                   | 14 +--\n lib/acl/acl_run_avx512x8.h                    | 12 +--\n lib/bpf/bpf_convert.c                         |  4 +-\n lib/bpf/bpf_validate.c                        | 10 +--\n lib/cryptodev/rte_cryptodev.h                 |  2 +-\n lib/dmadev/rte_dmadev.h                       |  4 +-\n lib/eal/arm/include/rte_cycles_32.h           |  2 +-\n lib/eal/common/eal_common_trace_ctf.c         |  8 +-\n lib/eal/freebsd/eal_interrupts.c              |  4 +-\n lib/eal/include/generic/rte_pflock.h          |  2 +-\n lib/eal/include/rte_malloc.h                  |  4 +-\n lib/eal/linux/eal_interrupts.c                |  4 +-\n lib/eal/linux/eal_vfio.h                      |  2 +-\n lib/eal/windows/eal_windows.h                 |  2 +-\n lib/eal/windows/include/dirent.h              |  4 +-\n lib/eal/windows/include/fnmatch.h             |  4 +-\n lib/eal/x86/include/rte_atomic.h              |  2 +-\n lib/eventdev/rte_event_eth_rx_adapter.c       |  6 +-\n lib/fib/rte_fib.c                             |  4 +-\n lib/fib/rte_fib.h                             |  4 +-\n lib/fib/rte_fib6.c                            |  4 +-\n lib/fib/rte_fib6.h                            |  4 +-\n lib/graph/graph_populate.c                    |  4 +-\n lib/hash/rte_crc_arm64.h                      |  2 +-\n lib/hash/rte_thash.c                          |  2 +-\n lib/ip_frag/ip_frag_internal.c                |  2 +-\n lib/ipsec/ipsec_sad.c                         | 10 +--\n lib/ipsec/ipsec_telemetry.c                   |  2 +-\n lib/ipsec/rte_ipsec_sad.h                     |  2 +-\n lib/ipsec/sa.c                                |  2 +-\n lib/mbuf/rte_mbuf_core.h                      |  2 +-\n lib/meson.build                               |  2 +-\n lib/net/rte_l2tpv2.h                          |  4 +-\n lib/pipeline/rte_swx_ctl.h                    |  4 +-\n lib/pipeline/rte_swx_pipeline_internal.h      |  4 +-\n lib/pipeline/rte_swx_pipeline_spec.c          |  2 +-\n lib/power/power_cppc_cpufreq.c                |  2 +-\n lib/regexdev/rte_regexdev.h                   |  6 +-\n lib/ring/rte_ring_core.h                      |  2 +-\n lib/sched/rte_pie.h                           |  6 +-\n lib/sched/rte_red.h                           |  4 +-\n lib/sched/rte_sched.c                         |  2 +-\n lib/sched/rte_sched.h                         |  2 +-\n lib/table/rte_swx_table.h                     |  2 +-\n lib/table/rte_swx_table_selector.h            |  2 +-\n lib/telemetry/telemetry.c                     |  2 +-\n lib/telemetry/telemetry_json.h                |  2 +-\n lib/vhost/vhost_user.c                        |  4 +-\n lib/vhost/virtio_net.c                        | 10 +--\n 483 files changed, 1328 insertions(+), 1328 deletions(-)",
    "diff": "diff --git a/app/proc-info/main.c b/app/proc-info/main.c\nindex ce140aaf..56070a33 100644\n--- a/app/proc-info/main.c\n+++ b/app/proc-info/main.c\n@@ -630,7 +630,7 @@ metrics_display(int port_id)\n \n \tnames =  rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0);\n \tif (names == NULL) {\n-\t\tprintf(\"Cannot allocate memory for metrcis names\\n\");\n+\t\tprintf(\"Cannot allocate memory for metrics names\\n\");\n \t\trte_free(metrics);\n \t\treturn;\n \t}\n@@ -1109,7 +1109,7 @@ show_tm(void)\n \t\t\t\tcaplevel.n_nodes_max,\n \t\t\t\tcaplevel.n_nodes_nonleaf_max,\n \t\t\t\tcaplevel.n_nodes_leaf_max);\n-\t\t\tprintf(\"\\t  -- indetical: non leaf %u leaf %u\\n\",\n+\t\t\tprintf(\"\\t  -- identical: non leaf %u leaf %u\\n\",\n \t\t\t\tcaplevel.non_leaf_nodes_identical,\n \t\t\t\tcaplevel.leaf_nodes_identical);\n \n@@ -1263,7 +1263,7 @@ show_ring(char *name)\n \t\t\tprintf(\"  - Name (%s) on socket (%d)\\n\"\n \t\t\t\t\"  - flags:\\n\"\n \t\t\t\t\"\\t  -- Single Producer Enqueue (%u)\\n\"\n-\t\t\t\t\"\\t  -- Single Consmer Dequeue (%u)\\n\",\n+\t\t\t\t\"\\t  -- Single Consumer Dequeue (%u)\\n\",\n \t\t\t\tptr->name,\n \t\t\t\tptr->memzone->socket_id,\n \t\t\t\tptr->flags & RING_F_SP_ENQ,\ndiff --git a/app/test-acl/main.c b/app/test-acl/main.c\nindex c2de1877..aa508f5d 100644\n--- a/app/test-acl/main.c\n+++ b/app/test-acl/main.c\n@@ -386,8 +386,8 @@ parse_cb_ipv4_trace(char *str, struct ipv4_5tuple *v)\n }\n \n /*\n- * Parses IPV6 address, exepcts the following format:\n- * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexedecimal digit).\n+ * Parses IPV6 address, expects the following format:\n+ * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexadecimal digit).\n  */\n static int\n parse_ipv6_addr(const char *in, const char **end, uint32_t v[IPV6_ADDR_U32],\n@@ -994,7 +994,7 @@ print_usage(const char *prgname)\n \t\t\t\"should be either 1 or multiple of %zu, \"\n \t\t\t\"but not greater then %u]\\n\"\n \t\t\"[--\" OPT_MAX_SIZE\n-\t\t\t\"=<size limit (in bytes) for runtime ACL strucutures> \"\n+\t\t\t\"=<size limit (in bytes) for runtime ACL structures> \"\n \t\t\t\"leave 0 for default behaviour]\\n\"\n \t\t\"[--\" OPT_ITER_NUM \"=<number of iterations to perform>]\\n\"\n \t\t\"[--\" OPT_VERBOSE \"=<verbose level>]\\n\"\ndiff --git a/app/test-compress-perf/comp_perf_test_cyclecount.c b/app/test-compress-perf/comp_perf_test_cyclecount.c\nindex da55b02b..1d8e5fe6 100644\n--- a/app/test-compress-perf/comp_perf_test_cyclecount.c\n+++ b/app/test-compress-perf/comp_perf_test_cyclecount.c\n@@ -180,7 +180,7 @@ main_loop(struct cperf_cyclecount_ctx *ctx, enum rte_comp_xform_type type)\n \n \tif (ops == NULL) {\n \t\tRTE_LOG(ERR, USER1,\n-\t\t\t\"Can't allocate memory for ops strucures\\n\");\n+\t\t\t\"Can't allocate memory for ops structures\\n\");\n \t\treturn -1;\n \t}\n \ndiff --git a/app/test-compress-perf/comp_perf_test_throughput.c b/app/test-compress-perf/comp_perf_test_throughput.c\nindex d3dff070..4569599e 100644\n--- a/app/test-compress-perf/comp_perf_test_throughput.c\n+++ b/app/test-compress-perf/comp_perf_test_throughput.c\n@@ -72,7 +72,7 @@ main_loop(struct cperf_benchmark_ctx *ctx, enum rte_comp_xform_type type)\n \n \tif (ops == NULL) {\n \t\tRTE_LOG(ERR, USER1,\n-\t\t\t\"Can't allocate memory for ops strucures\\n\");\n+\t\t\t\"Can't allocate memory for ops structures\\n\");\n \t\treturn -1;\n \t}\n \ndiff --git a/app/test-compress-perf/comp_perf_test_verify.c b/app/test-compress-perf/comp_perf_test_verify.c\nindex f6e21368..7d060294 100644\n--- a/app/test-compress-perf/comp_perf_test_verify.c\n+++ b/app/test-compress-perf/comp_perf_test_verify.c\n@@ -75,7 +75,7 @@ main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type)\n \n \tif (ops == NULL) {\n \t\tRTE_LOG(ERR, USER1,\n-\t\t\t\"Can't allocate memory for ops strucures\\n\");\n+\t\t\t\"Can't allocate memory for ops structures\\n\");\n \t\treturn -1;\n \t}\n \ndiff --git a/app/test-compress-perf/main.c b/app/test-compress-perf/main.c\nindex cc9951a9..6ff6a2f0 100644\n--- a/app/test-compress-perf/main.c\n+++ b/app/test-compress-perf/main.c\n@@ -67,7 +67,7 @@ comp_perf_check_capabilities(struct comp_test_data *test_data, uint8_t cdev_id)\n \n \tuint64_t comp_flags = cap->comp_feature_flags;\n \n-\t/* Huffman enconding */\n+\t/* Huffman encoding */\n \tif (test_data->huffman_enc == RTE_COMP_HUFFMAN_FIXED &&\n \t\t\t(comp_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0) {\n \t\tRTE_LOG(ERR, USER1,\ndiff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c\nindex ba1f104f..5842f29d 100644\n--- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c\n+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c\n@@ -334,7 +334,7 @@ pmd_cyclecount_bench_burst_sz(\n \t * queue, so we never get any failed enqs unless the driver won't accept\n \t * the exact number of descriptors we requested, or the driver won't\n \t * wrap around the end of the TX ring. However, since we're only\n-\t * dequeueing once we've filled up the queue, we have to benchmark it\n+\t * dequeuing once we've filled up the queue, we have to benchmark it\n \t * piecemeal and then average out the results.\n \t */\n \tcur_op = 0;\ndiff --git a/app/test-crypto-perf/cperf_test_vectors.h b/app/test-crypto-perf/cperf_test_vectors.h\nindex 70f2839c..4390c570 100644\n--- a/app/test-crypto-perf/cperf_test_vectors.h\n+++ b/app/test-crypto-perf/cperf_test_vectors.h\n@@ -2,8 +2,8 @@\n  * Copyright(c) 2016-2017 Intel Corporation\n  */\n \n-#ifndef _CPERF_TEST_VECTRORS_\n-#define _CPERF_TEST_VECTRORS_\n+#ifndef _CPERF_TEST_VECTORS_\n+#define _CPERF_TEST_VECTORS_\n \n #include \"cperf_options.h\"\n \ndiff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c\nindex 753a7dbd..4ae44801 100644\n--- a/app/test-eventdev/evt_options.c\n+++ b/app/test-eventdev/evt_options.c\n@@ -336,7 +336,7 @@ usage(char *program)\n \t\t\"\\t--deq_tmo_nsec     : global dequeue timeout\\n\"\n \t\t\"\\t--prod_type_ethdev : use ethernet device as producer.\\n\"\n \t\t\"\\t--prod_type_timerdev : use event timer device as producer.\\n\"\n-\t\t\"\\t                     expity_nsec would be the timeout\\n\"\n+\t\t\"\\t                     expiry_nsec would be the timeout\\n\"\n \t\t\"\\t                     in ns.\\n\"\n \t\t\"\\t--prod_type_timerdev_burst : use timer device as producer\\n\"\n \t\t\"\\t                             burst mode.\\n\"\ndiff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c\nindex ff7813f9..603e7c91 100644\n--- a/app/test-eventdev/test_order_common.c\n+++ b/app/test-eventdev/test_order_common.c\n@@ -253,7 +253,7 @@ void\n order_opt_dump(struct evt_options *opt)\n {\n \tevt_dump_producer_lcores(opt);\n-\tevt_dump(\"nb_wrker_lcores\", \"%d\", evt_nr_active_lcores(opt->wlcores));\n+\tevt_dump(\"nb_worker_lcores\", \"%d\", evt_nr_active_lcores(opt->wlcores));\n \tevt_dump_worker_lcores(opt);\n \tevt_dump(\"nb_evdev_ports\", \"%d\", order_nb_event_ports(opt));\n }\ndiff --git a/app/test-fib/main.c b/app/test-fib/main.c\nindex ecd42011..622703dc 100644\n--- a/app/test-fib/main.c\n+++ b/app/test-fib/main.c\n@@ -624,7 +624,7 @@ print_usage(void)\n \t\t\"(if -f is not specified)>]\\n\"\n \t\t\"[-r <percentage ratio of random ip's to lookup\"\n \t\t\"(if -t is not specified)>]\\n\"\n-\t\t\"[-c <do comarison with LPM library>]\\n\"\n+\t\t\"[-c <do comparison with LPM library>]\\n\"\n \t\t\"[-6 <do tests with ipv6 (default ipv4)>]\\n\"\n \t\t\"[-s <shuffle randomly generated routes>]\\n\"\n \t\t\"[-a <check nexthops for all ipv4 address space\"\n@@ -641,7 +641,7 @@ print_usage(void)\n \t\t\"[-g <number of tbl8's for dir24_8 or trie FIBs>]\\n\"\n \t\t\"[-w <path to the file to dump routing table>]\\n\"\n \t\t\"[-u <path to the file to dump ip's for lookup>]\\n\"\n-\t\t\"[-v <type of loookup function:\"\n+\t\t\"[-v <type of lookup function:\"\n \t\t\"\\ts1, s2, s3 (3 types of scalar), v (vector) -\"\n \t\t\" for DIR24_8 based FIB\\n\"\n \t\t\"\\ts, v - for TRIE based ipv6 FIB>]\\n\",\ndiff --git a/app/test-flow-perf/config.h b/app/test-flow-perf/config.h\nindex 0db2254b..29b63298 100644\n--- a/app/test-flow-perf/config.h\n+++ b/app/test-flow-perf/config.h\n@@ -28,7 +28,7 @@\n #define PORT_ID_DST 1\n #define TEID_VALUE 1\n \n-/* Flow items/acctions max size */\n+/* Flow items/actions max size */\n #define MAX_ITEMS_NUM 32\n #define MAX_ACTIONS_NUM 32\n #define MAX_ATTRS_NUM 16\ndiff --git a/app/test-flow-perf/main.c b/app/test-flow-perf/main.c\nindex 11f1ee0e..56d43734 100644\n--- a/app/test-flow-perf/main.c\n+++ b/app/test-flow-perf/main.c\n@@ -1519,7 +1519,7 @@ dump_used_cpu_time(const char *item,\n \t * threads time.\n \t *\n \t * Throughput: total count of rte rules divided\n-\t * over the average of the time cosumed by all\n+\t * over the average of the time consumed by all\n \t * threads time.\n \t */\n \tdouble insertion_latency_time;\ndiff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c\nindex 6e10afee..e626b1c7 100644\n--- a/app/test-pmd/cmdline.c\n+++ b/app/test-pmd/cmdline.c\n@@ -561,7 +561,7 @@ static void cmd_help_long_parsed(void *parsed_result,\n \t\t\t\"    Set the option to enable display of RX and TX bursts.\\n\"\n \n \t\t\t\"set port (port_id) vf (vf_id) rx|tx on|off\\n\"\n-\t\t\t\"    Enable/Disable a VF receive/tranmit from a port\\n\\n\"\n+\t\t\t\"    Enable/Disable a VF receive/transmit from a port\\n\\n\"\n \n \t\t\t\"set port (port_id) vf (vf_id) rxmode (AUPE|ROPE|BAM\"\n \t\t\t\"|MPE) (on|off)\\n\"\ndiff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c\nindex bbe3dc01..5c2bba48 100644\n--- a/app/test-pmd/cmdline_flow.c\n+++ b/app/test-pmd/cmdline_flow.c\n@@ -2162,7 +2162,7 @@ static const struct token token_list[] = {\n \t},\n \t[COMMON_POLICY_ID] = {\n \t\t.name = \"{policy_id}\",\n-\t\t.type = \"POLCIY_ID\",\n+\t\t.type = \"POLICY_ID\",\n \t\t.help = \"policy id\",\n \t\t.call = parse_int,\n \t\t.comp = comp_none,\n@@ -2370,7 +2370,7 @@ static const struct token token_list[] = {\n \t},\n \t[TUNNEL_DESTROY] = {\n \t\t.name = \"destroy\",\n-\t\t.help = \"destroy tunel\",\n+\t\t.help = \"destroy tunnel\",\n \t\t.next = NEXT(NEXT_ENTRY(TUNNEL_DESTROY_ID),\n \t\t\t     NEXT_ENTRY(COMMON_PORT_ID)),\n \t\t.args = ARGS(ARGS_ENTRY(struct buffer, port)),\n@@ -2378,7 +2378,7 @@ static const struct token token_list[] = {\n \t},\n \t[TUNNEL_DESTROY_ID] = {\n \t\t.name = \"id\",\n-\t\t.help = \"tunnel identifier to testroy\",\n+\t\t.help = \"tunnel identifier to destroy\",\n \t\t.next = NEXT(NEXT_ENTRY(COMMON_UNSIGNED)),\n \t\t.args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),\n \t\t.call = parse_tunnel,\ndiff --git a/app/test-pmd/cmdline_tm.c b/app/test-pmd/cmdline_tm.c\nindex bfbd43ca..281e4124 100644\n--- a/app/test-pmd/cmdline_tm.c\n+++ b/app/test-pmd/cmdline_tm.c\n@@ -69,7 +69,7 @@ print_err_msg(struct rte_tm_error *error)\n \t\t[RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS]\n \t\t\t= \"num shared shapers field (node params)\",\n \t\t[RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE]\n-\t\t\t= \"wfq weght mode field (node params)\",\n+\t\t\t= \"wfq weight mode field (node params)\",\n \t\t[RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES]\n \t\t\t= \"num strict priorities field (node params)\",\n \t\t[RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN]\n@@ -479,7 +479,7 @@ static void cmd_show_port_tm_level_cap_parsed(void *parsed_result,\n cmdline_parse_inst_t cmd_show_port_tm_level_cap = {\n \t.f = cmd_show_port_tm_level_cap_parsed,\n \t.data = NULL,\n-\t.help_str = \"Show Port TM Hierarhical level Capabilities\",\n+\t.help_str = \"Show Port TM Hierarchical level Capabilities\",\n \t.tokens = {\n \t\t(void *)&cmd_show_port_tm_level_cap_show,\n \t\t(void *)&cmd_show_port_tm_level_cap_port,\ndiff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c\nindex 2aeea243..0177284d 100644\n--- a/app/test-pmd/csumonly.c\n+++ b/app/test-pmd/csumonly.c\n@@ -796,7 +796,7 @@ pkt_copy_split(const struct rte_mbuf *pkt)\n  *\n  * The testpmd command line for this forward engine sets the flags\n  * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control\n- * wether a checksum must be calculated in software or in hardware. The\n+ * whether a checksum must be calculated in software or in hardware. The\n  * IP, UDP, TCP and SCTP flags always concern the inner layer. The\n  * OUTER_IP is only useful for tunnel packets.\n  */\ndiff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c\nindex f9185065..daf6a31b 100644\n--- a/app/test-pmd/parameters.c\n+++ b/app/test-pmd/parameters.c\n@@ -110,7 +110,7 @@ usage(char* progname)\n \t       \"If the drop-queue doesn't exist, the packet is dropped. \"\n \t       \"By default drop-queue=127.\\n\");\n #ifdef RTE_LIB_LATENCYSTATS\n-\tprintf(\"  --latencystats=N: enable latency and jitter statistcs \"\n+\tprintf(\"  --latencystats=N: enable latency and jitter statistics \"\n \t       \"monitoring on forwarding lcore id N.\\n\");\n #endif\n \tprintf(\"  --disable-crc-strip: disable CRC stripping by hardware.\\n\");\ndiff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c\nindex 55eb293c..6c387bde 100644\n--- a/app/test-pmd/testpmd.c\n+++ b/app/test-pmd/testpmd.c\n@@ -449,7 +449,7 @@ uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;\n uint8_t latencystats_enabled;\n \n /*\n- * Lcore ID to serive latency statistics.\n+ * Lcore ID to service latency statistics.\n  */\n lcoreid_t latencystats_lcore_id = -1;\n \ndiff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c\nindex b8497e73..47db8271 100644\n--- a/app/test-pmd/txonly.c\n+++ b/app/test-pmd/txonly.c\n@@ -174,14 +174,14 @@ update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len)\n \t\t\t\t\tsizeof(struct rte_ether_hdr) +\n \t\t\t\t\tsizeof(struct rte_ipv4_hdr) +\n \t\t\t\t\tsizeof(struct rte_udp_hdr)));\n-\t/* updata udp pkt length */\n+\t/* update udp pkt length */\n \tudp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,\n \t\t\t\tsizeof(struct rte_ether_hdr) +\n \t\t\t\tsizeof(struct rte_ipv4_hdr));\n \tpkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));\n \tudp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);\n \n-\t/* updata ip pkt length and csum */\n+\t/* update ip pkt length and csum */\n \tip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,\n \t\t\t\tsizeof(struct rte_ether_hdr));\n \tip_hdr->hdr_checksum = 0;\ndiff --git a/app/test/test_barrier.c b/app/test/test_barrier.c\nindex 6d6d4874..ec69af25 100644\n--- a/app/test/test_barrier.c\n+++ b/app/test/test_barrier.c\n@@ -11,7 +11,7 @@\n   * (https://en.wikipedia.org/wiki/Peterson%27s_algorithm)\n   * for two execution units to make sure that rte_smp_mb() prevents\n   * store-load reordering to happen.\n-  * Also when executed on a single lcore could be used as a approxiamate\n+  * Also when executed on a single lcore could be used as a approximate\n   * estimation of number of cycles particular implementation of rte_smp_mb()\n   * will take.\n   */\ndiff --git a/app/test/test_bpf.c b/app/test/test_bpf.c\nindex 46bcb51f..2d755a87 100644\n--- a/app/test/test_bpf.c\n+++ b/app/test/test_bpf.c\n@@ -23,7 +23,7 @@\n /*\n  * Basic functional tests for librte_bpf.\n  * The main procedure - load eBPF program, execute it and\n- * compare restuls with expected values.\n+ * compare results with expected values.\n  */\n \n struct dummy_offset {\n@@ -2707,7 +2707,7 @@ test_ld_mbuf1_check(uint64_t rc, const void *arg)\n }\n \n /*\n- * same as ld_mbuf1, but then trancate the mbuf by 1B,\n+ * same as ld_mbuf1, but then truncate the mbuf by 1B,\n  * so load of last 4B fail.\n  */\n static void\ndiff --git a/app/test/test_compressdev.c b/app/test/test_compressdev.c\nindex c63b5b67..57c566aa 100644\n--- a/app/test/test_compressdev.c\n+++ b/app/test/test_compressdev.c\n@@ -1256,7 +1256,7 @@ test_deflate_comp_run(const struct interim_data_params *int_data,\n \t\t/*\n \t\t * Store original operation index in private data,\n \t\t * since ordering does not have to be maintained,\n-\t\t * when dequeueing from compressdev, so a comparison\n+\t\t * when dequeuing from compressdev, so a comparison\n \t\t * at the end of the test can be done.\n \t\t */\n \t\tpriv_data = (struct priv_op_data *) (ops[i] + 1);\ndiff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c\nindex 10b48cda..6c949605 100644\n--- a/app/test/test_cryptodev.c\n+++ b/app/test/test_cryptodev.c\n@@ -6870,7 +6870,7 @@ test_snow3g_decryption_with_digest_test_case_1(void)\n \t}\n \n \t/*\n-\t * Function prepare data for hash veryfication test case.\n+\t * Function prepare data for hash verification test case.\n \t * Digest is allocated in 4 last bytes in plaintext, pattern.\n \t */\n \tsnow3g_hash_test_vector_setup(&snow3g_test_case_7, &snow3g_hash_data);\ndiff --git a/app/test/test_fib_perf.c b/app/test/test_fib_perf.c\nindex 86b2f832..7a25fe8d 100644\n--- a/app/test/test_fib_perf.c\n+++ b/app/test/test_fib_perf.c\n@@ -346,7 +346,7 @@ test_fib_perf(void)\n \tfib = rte_fib_create(__func__, SOCKET_ID_ANY, &config);\n \tTEST_FIB_ASSERT(fib != NULL);\n \n-\t/* Measue add. */\n+\t/* Measure add. */\n \tbegin = rte_rdtsc();\n \n \tfor (i = 0; i < NUM_ROUTE_ENTRIES; i++) {\ndiff --git a/app/test/test_kni.c b/app/test/test_kni.c\nindex 40ab0d5c..2761de9b 100644\n--- a/app/test/test_kni.c\n+++ b/app/test/test_kni.c\n@@ -326,7 +326,7 @@ test_kni_register_handler_mp(void)\n \n \t\t/* Check with the invalid parameters */\n \t\tif (rte_kni_register_handlers(kni, NULL) == 0) {\n-\t\t\tprintf(\"Unexpectedly register successuflly \"\n+\t\t\tprintf(\"Unexpectedly register successfully \"\n \t\t\t\t\t\"with NULL ops pointer\\n\");\n \t\t\texit(-1);\n \t\t}\n@@ -475,7 +475,7 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)\n \n \t/**\n \t * Check multiple processes support on\n-\t * registerring/unregisterring handlers.\n+\t * registering/unregistering handlers.\n \t */\n \tif (test_kni_register_handler_mp() < 0) {\n \t\tprintf(\"fail to check multiple process support\\n\");\ndiff --git a/app/test/test_kvargs.c b/app/test/test_kvargs.c\nindex a91ea8dc..b7b97a0d 100644\n--- a/app/test/test_kvargs.c\n+++ b/app/test/test_kvargs.c\n@@ -11,7 +11,7 @@\n \n #include \"test.h\"\n \n-/* incrementd in handler, to check it is properly called once per\n+/* incremented in handler, to check it is properly called once per\n  * key/value association */\n static unsigned count;\n \n@@ -107,14 +107,14 @@ static int test_valid_kvargs(void)\n \t\tgoto fail;\n \t}\n \tcount = 0;\n-\t/* call check_handler() for all entries with key=\"unexistant_key\" */\n-\tif (rte_kvargs_process(kvlist, \"unexistant_key\", check_handler, NULL) < 0) {\n+\t/* call check_handler() for all entries with key=\"nonexistent_key\" */\n+\tif (rte_kvargs_process(kvlist, \"nonexistent_key\", check_handler, NULL) < 0) {\n \t\tprintf(\"rte_kvargs_process() error\\n\");\n \t\trte_kvargs_free(kvlist);\n \t\tgoto fail;\n \t}\n \tif (count != 0) {\n-\t\tprintf(\"invalid count value %d after rte_kvargs_process(unexistant_key)\\n\",\n+\t\tprintf(\"invalid count value %d after rte_kvargs_process(nonexistent_key)\\n\",\n \t\t\tcount);\n \t\trte_kvargs_free(kvlist);\n \t\tgoto fail;\n@@ -135,10 +135,10 @@ static int test_valid_kvargs(void)\n \t\trte_kvargs_free(kvlist);\n \t\tgoto fail;\n \t}\n-\t/* count all entries with key=\"unexistant_key\" */\n-\tcount = rte_kvargs_count(kvlist, \"unexistant_key\");\n+\t/* count all entries with key=\"nonexistent_key\" */\n+\tcount = rte_kvargs_count(kvlist, \"nonexistent_key\");\n \tif (count != 0) {\n-\t\tprintf(\"invalid count value %d after rte_kvargs_count(unexistant_key)\\n\",\n+\t\tprintf(\"invalid count value %d after rte_kvargs_count(nonexistent_key)\\n\",\n \t\t\tcount);\n \t\trte_kvargs_free(kvlist);\n \t\tgoto fail;\n@@ -156,7 +156,7 @@ static int test_valid_kvargs(void)\n \t/* call check_handler() on all entries with key=\"check\", it\n \t * should fail as the value is not recognized by the handler */\n \tif (rte_kvargs_process(kvlist, \"check\", check_handler, NULL) == 0) {\n-\t\tprintf(\"rte_kvargs_process() is success bu should not\\n\");\n+\t\tprintf(\"rte_kvargs_process() is success but should not\\n\");\n \t\trte_kvargs_free(kvlist);\n \t\tgoto fail;\n \t}\ndiff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c\nindex dc6fc46b..80ea1cdc 100644\n--- a/app/test/test_link_bonding.c\n+++ b/app/test/test_link_bonding.c\n@@ -2026,7 +2026,7 @@ uint8_t polling_slave_mac[] = {0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x00 };\n int polling_test_slaves[TEST_RR_POLLING_LINK_STATUS_SLAVE_COUNT] = { -1, -1 };\n \n static int\n-test_roundrobin_verfiy_polling_slave_link_status_change(void)\n+test_roundrobin_verify_polling_slave_link_status_change(void)\n {\n \tstruct rte_ether_addr *mac_addr =\n \t\t(struct rte_ether_addr *)polling_slave_mac;\n@@ -5118,7 +5118,7 @@ static struct unit_test_suite link_bonding_test_suite  = {\n \t\tTEST_CASE(test_roundrobin_verify_promiscuous_enable_disable),\n \t\tTEST_CASE(test_roundrobin_verify_mac_assignment),\n \t\tTEST_CASE(test_roundrobin_verify_slave_link_status_change_behaviour),\n-\t\tTEST_CASE(test_roundrobin_verfiy_polling_slave_link_status_change),\n+\t\tTEST_CASE(test_roundrobin_verify_polling_slave_link_status_change),\n \t\tTEST_CASE(test_activebackup_tx_burst),\n \t\tTEST_CASE(test_activebackup_rx_burst),\n \t\tTEST_CASE(test_activebackup_verify_promiscuous_enable_disable),\ndiff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c\nindex 351129de..aea76e70 100644\n--- a/app/test/test_link_bonding_mode4.c\n+++ b/app/test/test_link_bonding_mode4.c\n@@ -58,11 +58,11 @@ static const struct rte_ether_addr slave_mac_default = {\n \t{ 0x00, 0xFF, 0x00, 0xFF, 0x00, 0x00 }\n };\n \n-static const struct rte_ether_addr parnter_mac_default = {\n+static const struct rte_ether_addr partner_mac_default = {\n \t{ 0x22, 0xBB, 0xFF, 0xBB, 0x00, 0x00 }\n };\n \n-static const struct rte_ether_addr parnter_system = {\n+static const struct rte_ether_addr partner_system = {\n \t{ 0x33, 0xFF, 0xBB, 0xFF, 0x00, 0x00 }\n };\n \n@@ -76,7 +76,7 @@ struct slave_conf {\n \tuint16_t port_id;\n \tuint8_t bonded : 1;\n \n-\tuint8_t lacp_parnter_state;\n+\tuint8_t lacp_partner_state;\n };\n \n struct ether_vlan_hdr {\n@@ -258,7 +258,7 @@ add_slave(struct slave_conf *slave, uint8_t start)\n \tTEST_ASSERT_EQUAL(rte_is_same_ether_addr(&addr, &addr_check), 1,\n \t\t\t\"Slave MAC address is not as expected\");\n \n-\tRTE_VERIFY(slave->lacp_parnter_state == 0);\n+\tRTE_VERIFY(slave->lacp_partner_state == 0);\n \treturn 0;\n }\n \n@@ -288,7 +288,7 @@ remove_slave(struct slave_conf *slave)\n \t\t\ttest_params.bonded_port_id);\n \n \tslave->bonded = 0;\n-\tslave->lacp_parnter_state = 0;\n+\tslave->lacp_partner_state = 0;\n \treturn 0;\n }\n \n@@ -501,20 +501,20 @@ make_lacp_reply(struct slave_conf *slave, struct rte_mbuf *pkt)\n \tslow_hdr = rte_pktmbuf_mtod(pkt, struct slow_protocol_frame *);\n \n \t/* Change source address to partner address */\n-\trte_ether_addr_copy(&parnter_mac_default, &slow_hdr->eth_hdr.src_addr);\n+\trte_ether_addr_copy(&partner_mac_default, &slow_hdr->eth_hdr.src_addr);\n \tslow_hdr->eth_hdr.src_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] =\n \t\tslave->port_id;\n \n \tlacp = (struct lacpdu *) &slow_hdr->slow_protocol;\n \t/* Save last received state */\n-\tslave->lacp_parnter_state = lacp->actor.state;\n+\tslave->lacp_partner_state = lacp->actor.state;\n \t/* Change it into LACP replay by matching parameters. */\n \tmemcpy(&lacp->partner.port_params, &lacp->actor.port_params,\n \t\tsizeof(struct port_params));\n \n \tlacp->partner.state = lacp->actor.state;\n \n-\trte_ether_addr_copy(&parnter_system, &lacp->actor.port_params.system);\n+\trte_ether_addr_copy(&partner_system, &lacp->actor.port_params.system);\n \tlacp->actor.state = STATE_LACP_ACTIVE |\n \t\t\t\t\t\tSTATE_SYNCHRONIZATION |\n \t\t\t\t\t\tSTATE_AGGREGATION |\n@@ -580,7 +580,7 @@ bond_handshake_done(struct slave_conf *slave)\n \tconst uint8_t expected_state = STATE_LACP_ACTIVE | STATE_SYNCHRONIZATION |\n \t\t\tSTATE_AGGREGATION | STATE_COLLECTING | STATE_DISTRIBUTING;\n \n-\treturn slave->lacp_parnter_state == expected_state;\n+\treturn slave->lacp_partner_state == expected_state;\n }\n \n static unsigned\n@@ -1134,7 +1134,7 @@ test_mode4_tx_burst(void)\n \n \t\tif (slave_down_id == slave->port_id) {\n \t\t\tTEST_ASSERT_EQUAL(normal_cnt + slow_cnt, 0,\n-\t\t\t\t\"slave %u enexpectedly transmitted %u packets\",\n+\t\t\t\t\"slave %u unexpectedly transmitted %u packets\",\n \t\t\t\tnormal_cnt + slow_cnt, slave->port_id);\n \t\t} else {\n \t\t\tTEST_ASSERT_EQUAL(slow_cnt, 0,\n@@ -1165,7 +1165,7 @@ init_marker(struct rte_mbuf *pkt, struct slave_conf *slave)\n \t\t\t&marker_hdr->eth_hdr.dst_addr);\n \n \t/* Init source address */\n-\trte_ether_addr_copy(&parnter_mac_default,\n+\trte_ether_addr_copy(&partner_mac_default,\n \t\t\t&marker_hdr->eth_hdr.src_addr);\n \tmarker_hdr->eth_hdr.src_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] =\n \t\tslave->port_id;\n@@ -1353,7 +1353,7 @@ test_mode4_expired(void)\n \t/* After test only expected slave should be in EXPIRED state */\n \tFOR_EACH_SLAVE(i, slave) {\n \t\tif (slave == exp_slave)\n-\t\t\tTEST_ASSERT(slave->lacp_parnter_state & STATE_EXPIRED,\n+\t\t\tTEST_ASSERT(slave->lacp_partner_state & STATE_EXPIRED,\n \t\t\t\t\"Slave %u should be in expired.\", slave->port_id);\n \t\telse\n \t\t\tTEST_ASSERT_EQUAL(bond_handshake_done(slave), 1,\n@@ -1392,7 +1392,7 @@ test_mode4_ext_ctrl(void)\n \t\t},\n \t};\n \n-\trte_ether_addr_copy(&parnter_system, &src_mac);\n+\trte_ether_addr_copy(&partner_system, &src_mac);\n \trte_ether_addr_copy(&slow_protocol_mac_addr, &dst_mac);\n \n \tinitialize_eth_header(&lacpdu.eth_hdr, &src_mac, &dst_mac,\n@@ -1446,7 +1446,7 @@ test_mode4_ext_lacp(void)\n \t\t},\n \t};\n \n-\trte_ether_addr_copy(&parnter_system, &src_mac);\n+\trte_ether_addr_copy(&partner_system, &src_mac);\n \trte_ether_addr_copy(&slow_protocol_mac_addr, &dst_mac);\n \n \tinitialize_eth_header(&lacpdu.eth_hdr, &src_mac, &dst_mac,\n@@ -1535,7 +1535,7 @@ check_environment(void)\n \t\tif (port->bonded != 0)\n \t\t\tenv_state |= 0x04;\n \n-\t\tif (port->lacp_parnter_state != 0)\n+\t\tif (port->lacp_partner_state != 0)\n \t\t\tenv_state |= 0x08;\n \n \t\tif (env_state != 0)\ndiff --git a/app/test/test_lpm6_data.h b/app/test/test_lpm6_data.h\nindex c3894f73..da9b161f 100644\n--- a/app/test/test_lpm6_data.h\n+++ b/app/test/test_lpm6_data.h\n@@ -22,7 +22,7 @@ struct ips_tbl_entry {\n  * in previous test_lpm6_routes.h . Because this table has only 1000\n  * lines, keeping it doesn't make LPM6 test case so large and also\n  * make the algorithm to generate rule table unnecessary and the\n- * algorithm to genertate test input IPv6 and associated expected\n+ * algorithm to generate test input IPv6 and associated expected\n  * next_hop much simple.\n  */\n \ndiff --git a/app/test/test_member.c b/app/test/test_member.c\nindex 40aa4c86..af9d5091 100644\n--- a/app/test/test_member.c\n+++ b/app/test/test_member.c\n@@ -459,7 +459,7 @@ static int test_member_multimatch(void)\n \t\t\t\t\t\tMAX_MATCH, set_ids_cache);\n \t\t/*\n \t\t * For cache mode, keys overwrite when signature same.\n-\t\t * the mutimatch should work like single match.\n+\t\t * the multimatch should work like single match.\n \t\t */\n \t\tTEST_ASSERT(ret_ht == M_MATCH_CNT && ret_vbf == M_MATCH_CNT &&\n \t\t\t\tret_cache == 1,\ndiff --git a/app/test/test_mempool.c b/app/test/test_mempool.c\nindex f6c650d1..8e493eda 100644\n--- a/app/test/test_mempool.c\n+++ b/app/test/test_mempool.c\n@@ -304,7 +304,7 @@ static int test_mempool_single_consumer(void)\n }\n \n /*\n- * test function for mempool test based on singple consumer and single producer,\n+ * test function for mempool test based on single consumer and single producer,\n  * can run on one lcore only\n  */\n static int\n@@ -322,7 +322,7 @@ my_mp_init(struct rte_mempool *mp, __rte_unused void *arg)\n }\n \n /*\n- * it tests the mempool operations based on singple producer and single consumer\n+ * it tests the mempool operations based on single producer and single consumer\n  */\n static int\n test_mempool_sp_sc(void)\ndiff --git a/app/test/test_memzone.c b/app/test/test_memzone.c\nindex 6ddd0fba..c9255e57 100644\n--- a/app/test/test_memzone.c\n+++ b/app/test/test_memzone.c\n@@ -543,7 +543,7 @@ test_memzone_reserve_max(void)\n \t\t}\n \n \t\tif (mz->len != maxlen) {\n-\t\t\tprintf(\"Memzone reserve with 0 size did not return bigest block\\n\");\n+\t\t\tprintf(\"Memzone reserve with 0 size did not return biggest block\\n\");\n \t\t\tprintf(\"Expected size = %zu, actual size = %zu\\n\",\n \t\t\t\t\tmaxlen, mz->len);\n \t\t\trte_dump_physmem_layout(stdout);\n@@ -606,7 +606,7 @@ test_memzone_reserve_max_aligned(void)\n \n \t\tif (mz->len < minlen || mz->len > maxlen) {\n \t\t\tprintf(\"Memzone reserve with 0 size and alignment %u did not return\"\n-\t\t\t\t\t\" bigest block\\n\", align);\n+\t\t\t\t\t\" biggest block\\n\", align);\n \t\t\tprintf(\"Expected size = %zu-%zu, actual size = %zu\\n\",\n \t\t\t\t\tminlen, maxlen, mz->len);\n \t\t\trte_dump_physmem_layout(stdout);\n@@ -1054,7 +1054,7 @@ test_memzone_basic(void)\n \tif (mz != memzone1)\n \t\treturn -1;\n \n-\tprintf(\"test duplcate zone name\\n\");\n+\tprintf(\"test duplicate zone name\\n\");\n \tmz = rte_memzone_reserve(TEST_MEMZONE_NAME(\"testzone1\"), 100,\n \t\t\tSOCKET_ID_ANY, 0);\n \tif (mz != NULL)\ndiff --git a/app/test/test_metrics.c b/app/test/test_metrics.c\nindex e736019a..11222133 100644\n--- a/app/test/test_metrics.c\n+++ b/app/test/test_metrics.c\n@@ -121,7 +121,7 @@ test_metrics_update_value(void)\n \terr = rte_metrics_update_value(RTE_METRICS_GLOBAL, KEY, VALUE);\n \tTEST_ASSERT(err >= 0, \"%s, %d\", __func__, __LINE__);\n \n-\t/* Successful Test: Valid port_id otherthan RTE_METRICS_GLOBAL, key\n+\t/* Successful Test: Valid port_id other than RTE_METRICS_GLOBAL, key\n \t * and value\n \t */\n \terr = rte_metrics_update_value(9, KEY, VALUE);\ndiff --git a/app/test/test_pcapng.c b/app/test/test_pcapng.c\nindex c2dbeaf6..34c5e123 100644\n--- a/app/test/test_pcapng.c\n+++ b/app/test/test_pcapng.c\n@@ -109,7 +109,7 @@ test_setup(void)\n \t\treturn -1;\n \t}\n \n-\t/* Make a pool for cloned packeets */\n+\t/* Make a pool for cloned packets */\n \tmp = rte_pktmbuf_pool_create_by_ops(\"pcapng_test_pool\", NUM_PACKETS,\n \t\t\t\t\t    0, 0,\n \t\t\t\t\t    rte_pcapng_mbuf_size(pkt_len),\ndiff --git a/app/test/test_power_cpufreq.c b/app/test/test_power_cpufreq.c\nindex 1a954952..4d013cd7 100644\n--- a/app/test/test_power_cpufreq.c\n+++ b/app/test/test_power_cpufreq.c\n@@ -659,7 +659,7 @@ test_power_cpufreq(void)\n \t/* test of exit power management for an invalid lcore */\n \tret = rte_power_exit(TEST_POWER_LCORE_INVALID);\n \tif (ret == 0) {\n-\t\tprintf(\"Unpectedly exit power management successfully for \"\n+\t\tprintf(\"Unexpectedly exit power management successfully for \"\n \t\t\t\t\"lcore %u\\n\", TEST_POWER_LCORE_INVALID);\n \t\trte_power_unset_env();\n \t\treturn -1;\ndiff --git a/app/test/test_rcu_qsbr.c b/app/test/test_rcu_qsbr.c\nindex ab37a068..70404e89 100644\n--- a/app/test/test_rcu_qsbr.c\n+++ b/app/test/test_rcu_qsbr.c\n@@ -408,7 +408,7 @@ test_rcu_qsbr_synchronize_reader(void *arg)\n \n /*\n  * rte_rcu_qsbr_synchronize: Wait till all the reader threads have entered\n- * the queiscent state.\n+ * the quiescent state.\n  */\n static int\n test_rcu_qsbr_synchronize(void)\n@@ -443,7 +443,7 @@ test_rcu_qsbr_synchronize(void)\n \trte_rcu_qsbr_synchronize(t[0], RTE_MAX_LCORE - 1);\n \trte_rcu_qsbr_thread_offline(t[0], RTE_MAX_LCORE - 1);\n \n-\t/* Test if the API returns after unregisterng all the threads */\n+\t/* Test if the API returns after unregistering all the threads */\n \tfor (i = 0; i < RTE_MAX_LCORE; i++)\n \t\trte_rcu_qsbr_thread_unregister(t[0], i);\n \trte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);\ndiff --git a/app/test/test_red.c b/app/test/test_red.c\nindex 05936cfe..33a9f4eb 100644\n--- a/app/test/test_red.c\n+++ b/app/test/test_red.c\n@@ -1566,10 +1566,10 @@ static void ovfl_check_avg(uint32_t avg)\n }\n \n static struct test_config ovfl_test1_config = {\n-\t.ifname = \"queue avergage overflow test interface\",\n+\t.ifname = \"queue average overflow test interface\",\n \t.msg = \"overflow test 1 : use one RED configuration,\\n\"\n \t\"\t\t  increase average queue size to target level,\\n\"\n-\t\"\t\t  check maximum number of bits requirte_red to represent avg_s\\n\\n\",\n+\t\"\t\t  check maximum number of bits required to represent avg_s\\n\\n\",\n \t.htxt = \"avg queue size  \"\n \t\"wq_log2  \"\n \t\"fraction bits  \"\n@@ -1757,12 +1757,12 @@ test_invalid_parameters(void)\n \t\tprintf(\"%i: rte_red_config_init should have failed!\\n\", __LINE__);\n \t\treturn -1;\n \t}\n-\t/* min_treshold == max_treshold */\n+\t/* min_threshold == max_threshold */\n \tif (rte_red_config_init(&config, 0, 1, 1, 0) == 0) {\n \t\tprintf(\"%i: rte_red_config_init should have failed!\\n\", __LINE__);\n \t\treturn -1;\n \t}\n-\t/* min_treshold > max_treshold */\n+\t/* min_threshold > max_threshold */\n \tif (rte_red_config_init(&config, 0, 2, 1, 0) == 0) {\n \t\tprintf(\"%i: rte_red_config_init should have failed!\\n\", __LINE__);\n \t\treturn -1;\ndiff --git a/app/test/test_security.c b/app/test/test_security.c\nindex 060cf1ff..059731b6 100644\n--- a/app/test/test_security.c\n+++ b/app/test/test_security.c\n@@ -237,7 +237,7 @@\n  * increases .called counter. Function returns value stored in .ret field\n  * of the structure.\n  * In case of some parameters in some functions the expected value is unknown\n- * and cannot be detrmined prior to call. Such parameters are stored\n+ * and cannot be determined prior to call. Such parameters are stored\n  * in structure and can be compared or analyzed later in test case code.\n  *\n  * Below structures and functions follow the rules just described.\ndiff --git a/app/test/test_table.h b/app/test/test_table.h\nindex 209bdbff..003088f2 100644\n--- a/app/test/test_table.h\n+++ b/app/test/test_table.h\n@@ -25,7 +25,7 @@\n #define MAX_BULK 32\n #define N 65536\n #define TIME_S 5\n-#define TEST_RING_FULL_EMTPY_ITER   8\n+#define TEST_RING_FULL_EMPTY_ITER   8\n #define N_PORTS             2\n #define N_PKTS              2\n #define N_PKTS_EXT          6\ndiff --git a/app/test/test_table_pipeline.c b/app/test/test_table_pipeline.c\nindex aabf4375..915c451f 100644\n--- a/app/test/test_table_pipeline.c\n+++ b/app/test/test_table_pipeline.c\n@@ -364,7 +364,7 @@ setup_pipeline(int test_type)\n \t\t\t\t.action = RTE_PIPELINE_ACTION_PORT,\n \t\t\t\t{.port_id = port_out_id[i^1]},\n \t\t\t};\n-\t\t\tprintf(\"Setting secont table to output to port\\n\");\n+\t\t\tprintf(\"Setting second table to output to port\\n\");\n \n \t\t\t/* Add the default action for the table. */\n \t\t\tret = rte_pipeline_table_default_entry_add(p,\ndiff --git a/app/test/test_thash.c b/app/test/test_thash.c\nindex a6253067..62ba4a95 100644\n--- a/app/test/test_thash.c\n+++ b/app/test/test_thash.c\n@@ -684,7 +684,7 @@ test_predictable_rss_multirange(void)\n \n \t/*\n \t * calculate hashes, complements, then adjust keys with\n-\t * complements and recalsulate hashes\n+\t * complements and recalculate hashes\n \t */\n \tfor (i = 0; i < RTE_DIM(rng_arr); i++) {\n \t\tfor (k = 0; k < 100; k++) {\ndiff --git a/buildtools/binutils-avx512-check.py b/buildtools/binutils-avx512-check.py\nindex a4e14f35..57392ecd 100644\n--- a/buildtools/binutils-avx512-check.py\n+++ b/buildtools/binutils-avx512-check.py\n@@ -1,5 +1,5 @@\n #! /usr/bin/env python3\n-# SPDX-License-Identitifer: BSD-3-Clause\n+# SPDX-License-Identifier: BSD-3-Clause\n # Copyright(c) 2020 Intel Corporation\n \n import subprocess\ndiff --git a/devtools/check-symbol-change.sh b/devtools/check-symbol-change.sh\nindex 8fcd0ce1..8992214a 100755\n--- a/devtools/check-symbol-change.sh\n+++ b/devtools/check-symbol-change.sh\n@@ -25,7 +25,7 @@ build_map_changes()\n \n \t\t# Triggering this rule, which starts a line and ends it\n \t\t# with a { identifies a versioned section.  The section name is\n-\t\t# the rest of the line with the + and { symbols remvoed.\n+\t\t# the rest of the line with the + and { symbols removed.\n \t\t# Triggering this rule sets in_sec to 1, which actives the\n \t\t# symbol rule below\n \t\t/^.*{/ {\n@@ -35,7 +35,7 @@ build_map_changes()\n \t\t\t}\n \t\t}\n \n-\t\t# This rule idenfies the end of a section, and disables the\n+\t\t# This rule identifies the end of a section, and disables the\n \t\t# symbol rule\n \t\t/.*}/ {in_sec=0}\n \n@@ -100,7 +100,7 @@ check_for_rule_violations()\n \t\t\t\t# Just inform the user of this occurrence, but\n \t\t\t\t# don't flag it as an error\n \t\t\t\techo -n \"INFO: symbol $symname is added but \"\n-\t\t\t\techo -n \"patch has insuficient context \"\n+\t\t\t\techo -n \"patch has insufficient context \"\n \t\t\t\techo -n \"to determine the section name \"\n \t\t\t\techo -n \"please ensure the version is \"\n \t\t\t\techo \"EXPERIMENTAL\"\ndiff --git a/doc/guides/howto/img/virtio_user_for_container_networking.svg b/doc/guides/howto/img/virtio_user_for_container_networking.svg\nindex de808066..dc9b318e 100644\n--- a/doc/guides/howto/img/virtio_user_for_container_networking.svg\n+++ b/doc/guides/howto/img/virtio_user_for_container_networking.svg\n@@ -465,7 +465,7 @@\n        v:mID=\"63\"\n        id=\"shape63-63\"><title\n          id=\"title149\">Sheet.63</title><desc\n-         id=\"desc151\">Contanier/App</desc><v:textBlock\n+         id=\"desc151\">Container/App</desc><v:textBlock\n          v:margins=\"rect(4,4,4,4)\" /><v:textRect\n          height=\"22.5\"\n          width=\"90\"\ndiff --git a/doc/guides/nics/af_packet.rst b/doc/guides/nics/af_packet.rst\nindex 82923691..66b977e1 100644\n--- a/doc/guides/nics/af_packet.rst\n+++ b/doc/guides/nics/af_packet.rst\n@@ -9,7 +9,7 @@ packets. This Linux-specific PMD binds to an AF_PACKET socket and allows\n a DPDK application to send and receive raw packets through the Kernel.\n \n In order to improve Rx and Tx performance this implementation makes use of\n-PACKET_MMAP, which provides a mmap'ed ring buffer, shared between user space\n+PACKET_MMAP, which provides a mmapped ring buffer, shared between user space\n and kernel, that's used to send and receive packets. This helps reducing system\n calls and the copies needed between user space and Kernel.\n \ndiff --git a/doc/guides/nics/mlx4.rst b/doc/guides/nics/mlx4.rst\nindex a25add7c..c8110573 100644\n--- a/doc/guides/nics/mlx4.rst\n+++ b/doc/guides/nics/mlx4.rst\n@@ -178,7 +178,7 @@ DPDK and must be installed separately:\n \n   - mlx4_core: hardware driver managing Mellanox ConnectX-3 devices.\n   - mlx4_en: Ethernet device driver that provides kernel network interfaces.\n-  - mlx4_ib: InifiniBand device driver.\n+  - mlx4_ib: InfiniBand device driver.\n   - ib_uverbs: user space driver for verbs (entry point for libibverbs).\n \n - **Firmware update**\ndiff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst\nindex feb2e57c..daa7f2af 100644\n--- a/doc/guides/nics/mlx5.rst\n+++ b/doc/guides/nics/mlx5.rst\n@@ -649,7 +649,7 @@ Driver options\n \n   A timeout value is set in the driver to control the waiting time before\n   dropping a packet. Once the timer is expired, the delay drop will be\n-  deactivated for all the Rx queues with this feature enable. To re-activeate\n+  deactivated for all the Rx queues with this feature enable. To re-activate\n   it, a rearming is needed and it is part of the kernel driver starting from\n   OFED 5.5.\n \n@@ -1033,7 +1033,7 @@ Driver options\n \n   For the MARK action the last 16 values in the full range are reserved for\n   internal PMD purposes (to emulate FLAG action). The valid range for the\n-  MARK action values is 0-0xFFEF for the 16-bit mode and 0-xFFFFEF\n+  MARK action values is 0-0xFFEF for the 16-bit mode and 0-0xFFFFEF\n   for the 24-bit mode, the flows with the MARK action value outside\n   the specified range will be rejected.\n \n@@ -1317,7 +1317,7 @@ DPDK and must be installed separately:\n   - mlx5_core: hardware driver managing Mellanox\n     ConnectX-4/ConnectX-5/ConnectX-6/BlueField devices and related Ethernet kernel\n     network devices.\n-  - mlx5_ib: InifiniBand device driver.\n+  - mlx5_ib: InfiniBand device driver.\n   - ib_uverbs: user space driver for Verbs (entry point for libibverbs).\n \n - **Firmware update**\ndiff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst\nindex 0af35f5e..8766bc34 100644\n--- a/doc/guides/prog_guide/cryptodev_lib.rst\n+++ b/doc/guides/prog_guide/cryptodev_lib.rst\n@@ -751,7 +751,7 @@ feature is useful when the user wants to abandon partially enqueued operations\n for a failed enqueue burst operation and try enqueuing in a whole later.\n \n Similar as enqueue, there are two dequeue functions:\n-``rte_cryptodev_raw_dequeue`` for dequeing single operation, and\n+``rte_cryptodev_raw_dequeue`` for dequeuing single operation, and\n ``rte_cryptodev_raw_dequeue_burst`` for dequeuing a burst of operations (e.g.\n all operations in a ``struct rte_crypto_sym_vec`` descriptor). The\n ``rte_cryptodev_raw_dequeue_burst`` function allows the user to provide callback\ndiff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst\nindex 29f6fefc..c6accce7 100644\n--- a/doc/guides/prog_guide/env_abstraction_layer.rst\n+++ b/doc/guides/prog_guide/env_abstraction_layer.rst\n@@ -433,7 +433,7 @@ and decides on a preferred IOVA mode.\n \n - if all buses report RTE_IOVA_PA, then the preferred IOVA mode is RTE_IOVA_PA,\n - if all buses report RTE_IOVA_VA, then the preferred IOVA mode is RTE_IOVA_VA,\n-- if all buses report RTE_IOVA_DC, no bus expressed a preferrence, then the\n+- if all buses report RTE_IOVA_DC, no bus expressed a preference, then the\n   preferred mode is RTE_IOVA_DC,\n - if the buses disagree (at least one wants RTE_IOVA_PA and at least one wants\n   RTE_IOVA_VA), then the preferred IOVA mode is RTE_IOVA_DC (see below with the\n@@ -658,7 +658,7 @@ Known Issues\n + rte_ring\n \n   rte_ring supports multi-producer enqueue and multi-consumer dequeue.\n-  However, it is non-preemptive, this has a knock on effect of making rte_mempool non-preemptable.\n+  However, it is non-preemptive, this has a knock on effect of making rte_mempool non-preemptible.\n \n   .. note::\n \ndiff --git a/doc/guides/prog_guide/img/turbo_tb_decode.svg b/doc/guides/prog_guide/img/turbo_tb_decode.svg\nindex a259f458..95779c36 100644\n--- a/doc/guides/prog_guide/img/turbo_tb_decode.svg\n+++ b/doc/guides/prog_guide/img/turbo_tb_decode.svg\n@@ -460,7 +460,7 @@\n            height=\"14.642858\"\n            x=\"39.285713\"\n            y=\"287.16254\" /></flowRegion><flowPara\n-         id=\"flowPara4817\">offse</flowPara></flowRoot>    <text\n+         id=\"flowPara4817\">offset</flowPara></flowRoot>    <text\n        xml:space=\"preserve\"\n        style=\"font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144\"\n        x=\"74.16684\"\ndiff --git a/doc/guides/prog_guide/img/turbo_tb_encode.svg b/doc/guides/prog_guide/img/turbo_tb_encode.svg\nindex e3708a93..98a6b839 100644\n--- a/doc/guides/prog_guide/img/turbo_tb_encode.svg\n+++ b/doc/guides/prog_guide/img/turbo_tb_encode.svg\n@@ -649,7 +649,7 @@\n            height=\"14.642858\"\n            x=\"39.285713\"\n            y=\"287.16254\" /></flowRegion><flowPara\n-         id=\"flowPara4817\">offse</flowPara></flowRoot>    <text\n+         id=\"flowPara4817\">offset</flowPara></flowRoot>    <text\n        xml:space=\"preserve\"\n        style=\"font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144\"\n        x=\"16.351753\"\ndiff --git a/doc/guides/prog_guide/qos_framework.rst b/doc/guides/prog_guide/qos_framework.rst\nindex 89ea1995..22616117 100644\n--- a/doc/guides/prog_guide/qos_framework.rst\n+++ b/doc/guides/prog_guide/qos_framework.rst\n@@ -1196,12 +1196,12 @@ In the case of severe congestion, the dropper resorts to tail drop.\n This occurs when a packet queue has reached maximum capacity and cannot store any more packets.\n In this situation, all arriving packets are dropped.\n \n-The flow through the dropper is illustrated in :numref:`figure_flow_tru_droppper`.\n+The flow through the dropper is illustrated in :numref:`figure_flow_tru_dropper`.\n The RED/WRED/PIE algorithm is exercised first and tail drop second.\n \n-.. _figure_flow_tru_droppper:\n+.. _figure_flow_tru_dropper:\n \n-.. figure:: img/flow_tru_droppper.*\n+.. figure:: img/flow_tru_dropper.*\n \n    Flow Through the Dropper\n \ndiff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst\nindex c51ed88c..b4aa9c47 100644\n--- a/doc/guides/prog_guide/rte_flow.rst\n+++ b/doc/guides/prog_guide/rte_flow.rst\n@@ -1379,7 +1379,7 @@ Matches a network service header (RFC 8300).\n - ``ttl``: maximum SFF hopes (6 bits).\n - ``length``: total length in 4 bytes words (6 bits).\n - ``reserved1``: reserved1 bits (4 bits).\n-- ``mdtype``: ndicates format of NSH header (4 bits).\n+- ``mdtype``: indicates format of NSH header (4 bits).\n - ``next_proto``: indicates protocol type of encap data (8 bits).\n - ``spi``: service path identifier (3 bytes).\n - ``sindex``: service index (1 byte).\ndiff --git a/doc/guides/rawdevs/cnxk_bphy.rst b/doc/guides/rawdevs/cnxk_bphy.rst\nindex 3cb21756..522390bf 100644\n--- a/doc/guides/rawdevs/cnxk_bphy.rst\n+++ b/doc/guides/rawdevs/cnxk_bphy.rst\n@@ -37,7 +37,7 @@ using ``rte_rawdev_queue_conf_get()``.\n \n To perform data transfer use standard ``rte_rawdev_enqueue_buffers()`` and\n ``rte_rawdev_dequeue_buffers()`` APIs. Not all messages produce sensible\n-responses hence dequeueing is not always necessary.\n+responses hence dequeuing is not always necessary.\n \n BPHY CGX/RPM PMD\n ----------------\ndiff --git a/doc/guides/regexdevs/features_overview.rst b/doc/guides/regexdevs/features_overview.rst\nindex c512bde5..3e7ab409 100644\n--- a/doc/guides/regexdevs/features_overview.rst\n+++ b/doc/guides/regexdevs/features_overview.rst\n@@ -22,7 +22,7 @@ PCRE back tracking ctrl\n   Support PCRE back tracking ctrl.\n \n PCRE call outs\n-  Support PCRE call outes.\n+  Support PCRE call routes.\n \n PCRE forward reference\n   Support Forward reference.\ndiff --git a/doc/guides/rel_notes/release_16_07.rst b/doc/guides/rel_notes/release_16_07.rst\nindex 5be2d171..c4f2f712 100644\n--- a/doc/guides/rel_notes/release_16_07.rst\n+++ b/doc/guides/rel_notes/release_16_07.rst\n@@ -192,7 +192,7 @@ EAL\n \n * **igb_uio: Fixed possible mmap failure for Linux >= 4.5.**\n \n-  The mmaping of the iomem range of the PCI device fails for kernels that\n+  The mmapping of the iomem range of the PCI device fails for kernels that\n   enabled the ``CONFIG_IO_STRICT_DEVMEM`` option. The error seen by the\n   user is as similar to the following::\n \ndiff --git a/doc/guides/rel_notes/release_17_08.rst b/doc/guides/rel_notes/release_17_08.rst\nindex 25439dad..1fd17558 100644\n--- a/doc/guides/rel_notes/release_17_08.rst\n+++ b/doc/guides/rel_notes/release_17_08.rst\n@@ -232,7 +232,7 @@ API Changes\n   * The ``rte_cryptodev_configure()`` function does not create the session\n     mempool for the device anymore.\n   * The ``rte_cryptodev_queue_pair_attach_sym_session()`` and\n-    ``rte_cryptodev_queue_pair_dettach_sym_session()`` functions require\n+    ``rte_cryptodev_queue_pair_detach_sym_session()`` functions require\n     the new parameter ``device id``.\n   * Parameters of ``rte_cryptodev_sym_session_create()`` were modified to\n     accept ``mempool``, instead of ``device id`` and ``rte_crypto_sym_xform``.\ndiff --git a/doc/guides/rel_notes/release_2_1.rst b/doc/guides/rel_notes/release_2_1.rst\nindex 35e6c888..d0ad99eb 100644\n--- a/doc/guides/rel_notes/release_2_1.rst\n+++ b/doc/guides/rel_notes/release_2_1.rst\n@@ -671,7 +671,7 @@ Resolved Issues\n   value 0.\n \n \n-  Fixes: 40b966a211ab (\"ivshmem: library changes for mmaping using ivshmem\")\n+  Fixes: 40b966a211ab (\"ivshmem: library changes for mmapping using ivshmem\")\n \n \n * **ixgbe/base: Fix SFP probing.**\ndiff --git a/doc/guides/sample_app_ug/ip_reassembly.rst b/doc/guides/sample_app_ug/ip_reassembly.rst\nindex 06289c22..279bbca3 100644\n--- a/doc/guides/sample_app_ug/ip_reassembly.rst\n+++ b/doc/guides/sample_app_ug/ip_reassembly.rst\n@@ -154,7 +154,7 @@ each RX queue uses its own mempool.\n \n .. literalinclude:: ../../../examples/ip_reassembly/main.c\n     :language: c\n-    :start-after: mbufs stored int the gragment table. 8<\n+    :start-after: mbufs stored int the fragment table. 8<\n     :end-before: >8 End of mbufs stored int the fragmentation table.\n     :dedent: 1\n \ndiff --git a/doc/guides/sample_app_ug/l2_forward_cat.rst b/doc/guides/sample_app_ug/l2_forward_cat.rst\nindex 440642ef..3ada3575 100644\n--- a/doc/guides/sample_app_ug/l2_forward_cat.rst\n+++ b/doc/guides/sample_app_ug/l2_forward_cat.rst\n@@ -176,7 +176,7 @@ function. The value returned is the number of parsed arguments:\n .. literalinclude:: ../../../examples/l2fwd-cat/l2fwd-cat.c\n     :language: c\n     :start-after: Initialize the Environment Abstraction Layer (EAL). 8<\n-    :end-before: >8 End of initializion the Environment Abstraction Layer (EAL).\n+    :end-before: >8 End of initialization the Environment Abstraction Layer (EAL).\n     :dedent: 1\n \n The next task is to initialize the PQoS library and configure CAT. The\ndiff --git a/doc/guides/sample_app_ug/server_node_efd.rst b/doc/guides/sample_app_ug/server_node_efd.rst\nindex 605eb09a..c6cbc3de 100644\n--- a/doc/guides/sample_app_ug/server_node_efd.rst\n+++ b/doc/guides/sample_app_ug/server_node_efd.rst\n@@ -191,7 +191,7 @@ flow is not handled by the node.\n .. literalinclude:: ../../../examples/server_node_efd/node/node.c\n     :language: c\n     :start-after: Packets dequeued from the shared ring. 8<\n-    :end-before: >8 End of packets dequeueing.\n+    :end-before: >8 End of packets dequeuing.\n \n Finally, note that both processes updates statistics, such as transmitted, received\n and dropped packets, which are shown and refreshed by the server app.\ndiff --git a/doc/guides/sample_app_ug/skeleton.rst b/doc/guides/sample_app_ug/skeleton.rst\nindex 6d0de644..08ddd7aa 100644\n--- a/doc/guides/sample_app_ug/skeleton.rst\n+++ b/doc/guides/sample_app_ug/skeleton.rst\n@@ -54,7 +54,7 @@ function. The value returned is the number of parsed arguments:\n .. literalinclude:: ../../../examples/skeleton/basicfwd.c\n     :language: c\n     :start-after: Initializion the Environment Abstraction Layer (EAL). 8<\n-    :end-before: >8 End of initializion the Environment Abstraction Layer (EAL).\n+    :end-before: >8 End of initialization the Environment Abstraction Layer (EAL).\n     :dedent: 1\n \n \ndiff --git a/doc/guides/sample_app_ug/vm_power_management.rst b/doc/guides/sample_app_ug/vm_power_management.rst\nindex 7160b6a6..9ce87956 100644\n--- a/doc/guides/sample_app_ug/vm_power_management.rst\n+++ b/doc/guides/sample_app_ug/vm_power_management.rst\n@@ -681,7 +681,7 @@ The following is an example JSON string for a power management request.\n    \"resource_id\": 10\n    }}\n \n-To query the available frequences of an lcore, use the query_cpu_freq command.\n+To query the available frequencies of an lcore, use the query_cpu_freq command.\n Where {core_num} is the lcore to query.\n Before using this command, please enable responses via the set_query command on the host.\n \ndiff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst\nindex 44228cd7..94792d88 100644\n--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst\n+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst\n@@ -3510,7 +3510,7 @@ Tunnel offload\n Indicate tunnel offload rule type\n \n - ``tunnel_set {tunnel_id}``: mark rule as tunnel offload decap_set type.\n-- ``tunnel_match {tunnel_id}``:  mark rule as tunel offload match type.\n+- ``tunnel_match {tunnel_id}``:  mark rule as tunnel offload match type.\n \n Matching pattern\n ^^^^^^^^^^^^^^^^\ndiff --git a/drivers/baseband/acc100/rte_acc100_pmd.c b/drivers/baseband/acc100/rte_acc100_pmd.c\nindex 1c6080f2..662c98be 100644\n--- a/drivers/baseband/acc100/rte_acc100_pmd.c\n+++ b/drivers/baseband/acc100/rte_acc100_pmd.c\n@@ -4343,7 +4343,7 @@ poweron_cleanup(struct rte_bbdev *bbdev, struct acc100_device *d,\n \t\tfor (template_idx = ACC100_SIG_UL_5G;\n \t\t\t\ttemplate_idx <= ACC100_SIG_UL_5G_LAST;\n \t\t\t\ttemplate_idx++) {\n-\t\t\taddress = HWPfQmgrGrpTmplateReg4Indx\n+\t\t\taddress = HWPfQmgrGrpTemplateReg4Indx\n \t\t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n \t\t\tif (template_idx == failed_engine)\n \t\t\t\tacc100_reg_write(d, address, value);\n@@ -4392,7 +4392,7 @@ poweron_cleanup(struct rte_bbdev *bbdev, struct acc100_device *d,\n \t\taddress = HwPfFecUl5gIbDebugReg +\n \t\t\t\tACC100_ENGINE_OFFSET * template_idx;\n \t\tstatus = (acc100_reg_read(d, address) >> 4) & 0xF;\n-\t\taddress = HWPfQmgrGrpTmplateReg4Indx\n+\t\taddress = HWPfQmgrGrpTemplateReg4Indx\n \t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n \t\tif (status == 1) {\n \t\t\tacc100_reg_write(d, address, value);\n@@ -4470,7 +4470,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \tacc100_reg_write(d, address, value);\n \n \t/* Set default descriptor signature */\n-\taddress = HWPfDmaDescriptorSignatuture;\n+\taddress = HWPfDmaDescriptorSignature;\n \tvalue = 0;\n \tacc100_reg_write(d, address, value);\n \n@@ -4522,19 +4522,19 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t/* Template Priority in incremental order */\n \tfor (template_idx = 0; template_idx < ACC100_NUM_TMPL;\n \t\t\ttemplate_idx++) {\n-\t\taddress = HWPfQmgrGrpTmplateReg0Indx +\n+\t\taddress = HWPfQmgrGrpTemplateReg0Indx +\n \t\tACC100_BYTES_IN_WORD * (template_idx % 8);\n \t\tvalue = ACC100_TMPL_PRI_0;\n \t\tacc100_reg_write(d, address, value);\n-\t\taddress = HWPfQmgrGrpTmplateReg1Indx +\n+\t\taddress = HWPfQmgrGrpTemplateReg1Indx +\n \t\tACC100_BYTES_IN_WORD * (template_idx % 8);\n \t\tvalue = ACC100_TMPL_PRI_1;\n \t\tacc100_reg_write(d, address, value);\n-\t\taddress = HWPfQmgrGrpTmplateReg2indx +\n+\t\taddress = HWPfQmgrGrpTemplateReg2indx +\n \t\tACC100_BYTES_IN_WORD * (template_idx % 8);\n \t\tvalue = ACC100_TMPL_PRI_2;\n \t\tacc100_reg_write(d, address, value);\n-\t\taddress = HWPfQmgrGrpTmplateReg3Indx +\n+\t\taddress = HWPfQmgrGrpTemplateReg3Indx +\n \t\tACC100_BYTES_IN_WORD * (template_idx % 8);\n \t\tvalue = ACC100_TMPL_PRI_3;\n \t\tacc100_reg_write(d, address, value);\n@@ -4548,7 +4548,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \tfor (template_idx = 0; template_idx < ACC100_NUM_TMPL;\n \t\t\ttemplate_idx++) {\n \t\tvalue = 0;\n-\t\taddress = HWPfQmgrGrpTmplateReg4Indx\n+\t\taddress = HWPfQmgrGrpTemplateReg4Indx\n \t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n \t\tacc100_reg_write(d, address, value);\n \t}\n@@ -4561,7 +4561,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \tfor (template_idx = ACC100_SIG_UL_4G;\n \t\t\ttemplate_idx <= ACC100_SIG_UL_4G_LAST;\n \t\t\ttemplate_idx++) {\n-\t\taddress = HWPfQmgrGrpTmplateReg4Indx\n+\t\taddress = HWPfQmgrGrpTemplateReg4Indx\n \t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n \t\tacc100_reg_write(d, address, value);\n \t}\n@@ -4579,7 +4579,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\taddress = HwPfFecUl5gIbDebugReg +\n \t\t\t\tACC100_ENGINE_OFFSET * template_idx;\n \t\tstatus = (acc100_reg_read(d, address) >> 4) & 0xF;\n-\t\taddress = HWPfQmgrGrpTmplateReg4Indx\n+\t\taddress = HWPfQmgrGrpTemplateReg4Indx\n \t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n \t\tif (status == 1) {\n \t\t\tacc100_reg_write(d, address, value);\n@@ -4600,7 +4600,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \tfor (template_idx = ACC100_SIG_DL_4G;\n \t\t\ttemplate_idx <= ACC100_SIG_DL_4G_LAST;\n \t\t\ttemplate_idx++) {\n-\t\taddress = HWPfQmgrGrpTmplateReg4Indx\n+\t\taddress = HWPfQmgrGrpTemplateReg4Indx\n \t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n \t\tacc100_reg_write(d, address, value);\n #if RTE_ACC100_SINGLE_FEC == 1\n@@ -4616,7 +4616,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \tfor (template_idx = ACC100_SIG_DL_5G;\n \t\t\ttemplate_idx <= ACC100_SIG_DL_5G_LAST;\n \t\t\ttemplate_idx++) {\n-\t\taddress = HWPfQmgrGrpTmplateReg4Indx\n+\t\taddress = HWPfQmgrGrpTemplateReg4Indx\n \t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n \t\tacc100_reg_write(d, address, value);\n #if RTE_ACC100_SINGLE_FEC == 1\ndiff --git a/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c b/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c\nindex 92decc3e..21d35292 100644\n--- a/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c\n+++ b/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c\n@@ -2097,7 +2097,7 @@ dequeue_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,\n \trte_bbdev_log_debug(\"DMA response desc %p\", desc);\n \n \t*op = desc->enc_req.op_addr;\n-\t/* Check the decriptor error field, return 1 on error */\n+\t/* Check the descriptor error field, return 1 on error */\n \tdesc_error = check_desc_error(desc->enc_req.error);\n \t(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;\n \n@@ -2139,7 +2139,7 @@ dequeue_enc_one_op_tb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,\n \tfor (cb_idx = 0; cb_idx < cbs_in_op; ++cb_idx) {\n \t\tdesc = q->ring_addr + ((q->head_free_desc + desc_offset +\n \t\t\t\tcb_idx) & q->sw_ring_wrap_mask);\n-\t\t/* Check the decriptor error field, return 1 on error */\n+\t\t/* Check the descriptor error field, return 1 on error */\n \t\tdesc_error = check_desc_error(desc->enc_req.error);\n \t\tstatus |=  desc_error << RTE_BBDEV_DATA_ERROR;\n \t\trte_bbdev_log_debug(\"DMA response desc %p\", desc);\n@@ -2177,7 +2177,7 @@ dequeue_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,\n \t(*op)->turbo_dec.iter_count = (desc->dec_req.iter + 2) >> 1;\n \t/* crc_pass = 0 when decoder fails */\n \t(*op)->status = !(desc->dec_req.crc_pass) << RTE_BBDEV_CRC_ERROR;\n-\t/* Check the decriptor error field, return 1 on error */\n+\t/* Check the descriptor error field, return 1 on error */\n \tdesc_error = check_desc_error(desc->enc_req.error);\n \t(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;\n \treturn 1;\n@@ -2221,7 +2221,7 @@ dequeue_dec_one_op_tb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,\n \t\titer_count = RTE_MAX(iter_count, (uint8_t) desc->dec_req.iter);\n \t\t/* crc_pass = 0 when decoder fails, one fails all */\n \t\tstatus |= !(desc->dec_req.crc_pass) << RTE_BBDEV_CRC_ERROR;\n-\t\t/* Check the decriptor error field, return 1 on error */\n+\t\t/* Check the descriptor error field, return 1 on error */\n \t\tdesc_error = check_desc_error(desc->enc_req.error);\n \t\tstatus |= desc_error << RTE_BBDEV_DATA_ERROR;\n \t\trte_bbdev_log_debug(\"DMA response desc %p\", desc);\ndiff --git a/drivers/baseband/null/bbdev_null.c b/drivers/baseband/null/bbdev_null.c\nindex 753d920e..08cff582 100644\n--- a/drivers/baseband/null/bbdev_null.c\n+++ b/drivers/baseband/null/bbdev_null.c\n@@ -31,7 +31,7 @@ struct bbdev_null_params {\n \tuint16_t queues_num;  /*< Null BBDEV queues number */\n };\n \n-/* Accecptable params for null BBDEV devices */\n+/* Acceptable params for null BBDEV devices */\n #define BBDEV_NULL_MAX_NB_QUEUES_ARG  \"max_nb_queues\"\n #define BBDEV_NULL_SOCKET_ID_ARG      \"socket_id\"\n \ndiff --git a/drivers/baseband/turbo_sw/bbdev_turbo_software.c b/drivers/baseband/turbo_sw/bbdev_turbo_software.c\nindex b234bb75..c6b1eb86 100644\n--- a/drivers/baseband/turbo_sw/bbdev_turbo_software.c\n+++ b/drivers/baseband/turbo_sw/bbdev_turbo_software.c\n@@ -61,7 +61,7 @@ struct turbo_sw_params {\n \tuint16_t queues_num;  /*< Turbo SW device queues number */\n };\n \n-/* Accecptable params for Turbo SW devices */\n+/* Acceptable params for Turbo SW devices */\n #define TURBO_SW_MAX_NB_QUEUES_ARG  \"max_nb_queues\"\n #define TURBO_SW_SOCKET_ID_ARG      \"socket_id\"\n \ndiff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c\nindex 737ac8d8..5546a9cb 100644\n--- a/drivers/bus/dpaa/dpaa_bus.c\n+++ b/drivers/bus/dpaa/dpaa_bus.c\n@@ -70,7 +70,7 @@ compare_dpaa_devices(struct rte_dpaa_device *dev1,\n {\n \tint comp = 0;\n \n-\t/* Segragating ETH from SEC devices */\n+\t/* Segregating ETH from SEC devices */\n \tif (dev1->device_type > dev2->device_type)\n \t\tcomp = 1;\n \telse if (dev1->device_type < dev2->device_type)\ndiff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h\nindex 7ef2f3b2..9b63e559 100644\n--- a/drivers/bus/dpaa/include/fsl_qman.h\n+++ b/drivers/bus/dpaa/include/fsl_qman.h\n@@ -1353,7 +1353,7 @@ __rte_internal\n int qman_irqsource_add(u32 bits);\n \n /**\n- * qman_fq_portal_irqsource_add - samilar to qman_irqsource_add, but it\n+ * qman_fq_portal_irqsource_add - similar to qman_irqsource_add, but it\n  * takes portal (fq specific) as input rather than using the thread affined\n  * portal.\n  */\n@@ -1416,7 +1416,7 @@ __rte_internal\n struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);\n \n /**\n- * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue\n+ * qman_dqrr_consume - Consume the DQRR entry after volatile dequeue\n  * @fq: Frame Queue on which the volatile dequeue command is issued\n  * @dq: DQRR entry to consume. This is the one which is provided by the\n  *    'qbman_dequeue' command.\n@@ -2017,7 +2017,7 @@ int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,\n  * @cgr: the 'cgr' object to deregister\n  *\n  * \"Unplugs\" this CGR object from the portal affine to the cpu on which this API\n- * is executed. This must be excuted on the same affine portal on which it was\n+ * is executed. This must be executed on the same affine portal on which it was\n  * created.\n  */\n __rte_internal\ndiff --git a/drivers/bus/dpaa/include/fsl_usd.h b/drivers/bus/dpaa/include/fsl_usd.h\nindex dcf35e4a..97279421 100644\n--- a/drivers/bus/dpaa/include/fsl_usd.h\n+++ b/drivers/bus/dpaa/include/fsl_usd.h\n@@ -40,7 +40,7 @@ struct dpaa_raw_portal {\n \t/* Specifies the stash request queue this portal should use */\n \tuint8_t sdest;\n \n-\t/* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX\n+\t/* Specifies a specific portal index to map or QBMAN_ANY_PORTAL_IDX\n \t * for don't care.  The portal index will be populated by the\n \t * driver when the ioctl() successfully completes.\n \t */\ndiff --git a/drivers/bus/dpaa/include/process.h b/drivers/bus/dpaa/include/process.h\nindex a9229886..48d6b569 100644\n--- a/drivers/bus/dpaa/include/process.h\n+++ b/drivers/bus/dpaa/include/process.h\n@@ -49,7 +49,7 @@ struct dpaa_portal_map {\n struct dpaa_ioctl_portal_map {\n \t/* Input parameter, is a qman or bman portal required. */\n \tenum dpaa_portal_type type;\n-\t/* Specifes a specific portal index to map or 0xffffffff\n+\t/* Specifies a specific portal index to map or 0xffffffff\n \t * for don't care.\n \t */\n \tuint32_t index;\ndiff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c\nindex a0ef24cd..53fd7553 100644\n--- a/drivers/bus/fslmc/fslmc_bus.c\n+++ b/drivers/bus/fslmc/fslmc_bus.c\n@@ -539,7 +539,7 @@ rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)\n \n \tfslmc_bus = driver->fslmc_bus;\n \n-\t/* Cleanup the PA->VA Translation table; From whereever this function\n+\t/* Cleanup the PA->VA Translation table; From wherever this function\n \t * is called from.\n \t */\n \tif (rte_eal_iova_mode() == RTE_IOVA_PA)\ndiff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h\nindex 133606a9..2394445b 100644\n--- a/drivers/bus/fslmc/fslmc_vfio.h\n+++ b/drivers/bus/fslmc/fslmc_vfio.h\n@@ -56,7 +56,7 @@ int rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,\n int fslmc_vfio_setup_group(void);\n int fslmc_vfio_process_group(void);\n char *fslmc_get_container(void);\n-int fslmc_get_container_group(int *gropuid);\n+int fslmc_get_container_group(int *groupid);\n int rte_fslmc_vfio_dmamap(void);\n int rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size);\n \ndiff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c\nindex 2210a0fa..52605ea2 100644\n--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c\n+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c\n@@ -178,7 +178,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)\n \tdpio_epoll_fd = epoll_create(1);\n \tret = rte_dpaa2_intr_enable(dpio_dev->intr_handle, 0);\n \tif (ret) {\n-\t\tDPAA2_BUS_ERR(\"Interrupt registeration failed\");\n+\t\tDPAA2_BUS_ERR(\"Interrupt registration failed\");\n \t\treturn -1;\n \t}\n \ndiff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h\nindex b1bba1ac..957fc62d 100644\n--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h\n+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h\n@@ -156,7 +156,7 @@ struct dpaa2_queue {\n \t\tstruct rte_cryptodev_data *crypto_data;\n \t};\n \tuint32_t fqid;\t\t/*!< Unique ID of this queue */\n-\tuint16_t flow_id;\t/*!< To be used by DPAA2 frmework */\n+\tuint16_t flow_id;\t/*!< To be used by DPAA2 framework */\n \tuint8_t tc_index;\t/*!< traffic class identifier */\n \tuint8_t cgid;\t\t/*! < Congestion Group id for this queue */\n \tuint64_t rx_pkts;\ndiff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h\nindex eb68c9ca..5375ea38 100644\n--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h\n+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h\n@@ -510,7 +510,7 @@ int qbman_result_has_new_result(struct qbman_swp *s,\n \t\t\t\tstruct qbman_result *dq);\n \n /**\n- * qbman_check_command_complete() - Check if the previous issued dq commnd\n+ * qbman_check_command_complete() - Check if the previous issued dq command\n  * is completed and results are available in memory.\n  * @s: the software portal object.\n  * @dq: the dequeue result read from the memory.\n@@ -687,7 +687,7 @@ uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);\n \n /**\n  * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response\n- * odpid is valid only if ODPVAILD flag is TRUE.\n+ * odpid is valid only if ODPVALID flag is TRUE.\n  * @dq: the dequeue result.\n  *\n  * Return odpid.\n@@ -743,7 +743,7 @@ const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);\n  * qbman_result_SCN_state() - Get the state field in State-change notification\n  * @scn: the state change notification.\n  *\n- * Return the state in the notifiation.\n+ * Return the state in the notification.\n  */\n __rte_internal\n uint8_t qbman_result_SCN_state(const struct qbman_result *scn);\n@@ -825,7 +825,7 @@ uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);\n \n /* Parsing CGCU */\n /**\n- * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid\n+ * qbman_result_cgcu_cgid() - Check CGCU resource id, i.e. cgid\n  * @scn: the state change notification.\n  *\n  * Return the CGCU resource id.\n@@ -903,14 +903,14 @@ void qbman_eq_desc_clear(struct qbman_eq_desc *d);\n __rte_internal\n void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);\n /**\n- * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor\n+ * qbman_eq_desc_set_orp() - Set order-restoration in the enqueue descriptor\n  * @d: the enqueue descriptor.\n  * @response_success: 1 = enqueue with response always; 0 = enqueue with\n  * rejections returned on a FQ.\n  * @opr_id: the order point record id.\n  * @seqnum: the order restoration sequence number.\n- * @incomplete: indiates whether this is the last fragments using the same\n- * sequeue number.\n+ * @incomplete: indicates whether this is the last fragments using the same\n+ * sequence number.\n  */\n __rte_internal\n void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,\n@@ -1052,10 +1052,10 @@ __rte_internal\n uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);\n \n /**\n- * qbman_result_eqresp_rc() - determines if enqueue command is sucessful.\n+ * qbman_result_eqresp_rc() - determines if enqueue command is successful.\n  * @eqresp: enqueue response.\n  *\n- * Return 0 when command is sucessful.\n+ * Return 0 when command is successful.\n  */\n __rte_internal\n uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);\n@@ -1250,7 +1250,7 @@ int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);\n /**\n  * These functions change the FQ flow-control stuff between XON/XOFF. (The\n  * default is XON.) This setting doesn't affect enqueues to the FQ, just\n- * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when\n+ * dequeues. XOFF FQs will remain in the tentatively-scheduled state, even when\n  * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is\n  * changed to XOFF after it had already become truly-scheduled to a channel, and\n  * a pull dequeue of that channel occurs that selects that FQ for dequeuing,\ndiff --git a/drivers/bus/pci/linux/pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c\nindex 1a5e7c2d..cd0d0b16 100644\n--- a/drivers/bus/pci/linux/pci_vfio.c\n+++ b/drivers/bus/pci/linux/pci_vfio.c\n@@ -815,7 +815,7 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)\n \t\t\tcontinue;\n \t\t}\n \n-\t\t/* skip non-mmapable BARs */\n+\t\t/* skip non-mmappable BARs */\n \t\tif ((reg->flags & VFIO_REGION_INFO_FLAG_MMAP) == 0) {\n \t\t\tfree(reg);\n \t\t\tcontinue;\ndiff --git a/drivers/bus/vdev/rte_bus_vdev.h b/drivers/bus/vdev/rte_bus_vdev.h\nindex 28567999..5af6be00 100644\n--- a/drivers/bus/vdev/rte_bus_vdev.h\n+++ b/drivers/bus/vdev/rte_bus_vdev.h\n@@ -197,7 +197,7 @@ rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg);\n int rte_vdev_init(const char *name, const char *args);\n \n /**\n- * Uninitalize a driver specified by name.\n+ * Uninitialize a driver specified by name.\n  *\n  * @param name\n  *   The pointer to a driver name to be uninitialized.\ndiff --git a/drivers/bus/vmbus/vmbus_common.c b/drivers/bus/vmbus/vmbus_common.c\nindex 519ca9c6..36772736 100644\n--- a/drivers/bus/vmbus/vmbus_common.c\n+++ b/drivers/bus/vmbus/vmbus_common.c\n@@ -134,7 +134,7 @@ vmbus_probe_one_driver(struct rte_vmbus_driver *dr,\n \n /*\n  * If device class GUID matches, call the probe function of\n- * registere drivers for the vmbus device.\n+ * register drivers for the vmbus device.\n  * Return -1 if initialization failed,\n  * and 1 if no driver found for this device.\n  */\ndiff --git a/drivers/common/cnxk/roc_bphy_cgx.c b/drivers/common/cnxk/roc_bphy_cgx.c\nindex 7449cbe7..c3be3c90 100644\n--- a/drivers/common/cnxk/roc_bphy_cgx.c\n+++ b/drivers/common/cnxk/roc_bphy_cgx.c\n@@ -14,7 +14,7 @@\n #define CGX_CMRX_INT_OVERFLW\t       BIT_ULL(1)\n /*\n  * CN10K stores number of lmacs in 4 bit filed\n- * in contraty to CN9K which uses only 3 bits.\n+ * in contrary to CN9K which uses only 3 bits.\n  *\n  * In theory masks should differ yet on CN9K\n  * bits beyond specified range contain zeros.\ndiff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c\nindex 8f8e6d38..aac0fd6a 100644\n--- a/drivers/common/cnxk/roc_cpt.c\n+++ b/drivers/common/cnxk/roc_cpt.c\n@@ -375,7 +375,7 @@ cpt_available_lfs_get(struct dev *dev, uint16_t *nb_lf)\n }\n \n int\n-cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr,\n+cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmask, uint8_t blkaddr,\n \t      bool inl_dev_sso)\n {\n \tstruct cpt_lf_alloc_req_msg *req;\n@@ -390,7 +390,7 @@ cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr,\n \t\treq->sso_pf_func = nix_inl_dev_pffunc_get();\n \telse\n \t\treq->sso_pf_func = idev_sso_pffunc_get();\n-\treq->eng_grpmsk = eng_grpmsk;\n+\treq->eng_grpmask = eng_grpmask;\n \treq->blkaddr = blkaddr;\n \n \treturn mbox_process(mbox);\n@@ -481,7 +481,7 @@ roc_cpt_dev_configure(struct roc_cpt *roc_cpt, int nb_lf)\n \tstruct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);\n \tuint8_t blkaddr[ROC_CPT_MAX_BLKS];\n \tstruct msix_offset_rsp *rsp;\n-\tuint8_t eng_grpmsk;\n+\tuint8_t eng_grpmask;\n \tint blknum = 0;\n \tint rc, i;\n \n@@ -508,11 +508,11 @@ roc_cpt_dev_configure(struct roc_cpt *roc_cpt, int nb_lf)\n \tfor (i = 0; i < nb_lf; i++)\n \t\tcpt->lf_blkaddr[i] = blkaddr[blknum];\n \n-\teng_grpmsk = (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_AE]) |\n+\teng_grpmask = (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_AE]) |\n \t\t     (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_SE]) |\n \t\t     (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_IE]);\n \n-\trc = cpt_lfs_alloc(&cpt->dev, eng_grpmsk, blkaddr[blknum], false);\n+\trc = cpt_lfs_alloc(&cpt->dev, eng_grpmask, blkaddr[blknum], false);\n \tif (rc)\n \t\tgoto lfs_detach;\n \ndiff --git a/drivers/common/cnxk/roc_cpt_priv.h b/drivers/common/cnxk/roc_cpt_priv.h\nindex 61dec9a1..4bc888b2 100644\n--- a/drivers/common/cnxk/roc_cpt_priv.h\n+++ b/drivers/common/cnxk/roc_cpt_priv.h\n@@ -21,7 +21,7 @@ roc_cpt_to_cpt_priv(struct roc_cpt *roc_cpt)\n int cpt_lfs_attach(struct dev *dev, uint8_t blkaddr, bool modify,\n \t\t   uint16_t nb_lf);\n int cpt_lfs_detach(struct dev *dev);\n-int cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blk,\n+int cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmask, uint8_t blk,\n \t\t  bool inl_dev_sso);\n int cpt_lfs_free(struct dev *dev);\n int cpt_lf_init(struct roc_cpt_lf *lf);\ndiff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h\nindex b63fe108..ae576d1b 100644\n--- a/drivers/common/cnxk/roc_mbox.h\n+++ b/drivers/common/cnxk/roc_mbox.h\n@@ -1328,7 +1328,7 @@ struct cpt_lf_alloc_req_msg {\n \tstruct mbox_msghdr hdr;\n \tuint16_t __io nix_pf_func;\n \tuint16_t __io sso_pf_func;\n-\tuint16_t __io eng_grpmsk;\n+\tuint16_t __io eng_grpmask;\n \tuint8_t __io blkaddr;\n };\n \n@@ -1739,7 +1739,7 @@ enum tim_af_status {\n \tTIM_AF_INVALID_BSIZE = -813,\n \tTIM_AF_INVALID_ENABLE_PERIODIC = -814,\n \tTIM_AF_INVALID_ENABLE_DONTFREE = -815,\n-\tTIM_AF_ENA_DONTFRE_NSET_PERIODIC = -816,\n+\tTIM_AF_ENA_DONTFREE_NSET_PERIODIC = -816,\n \tTIM_AF_RING_ALREADY_DISABLED = -817,\n };\n \ndiff --git a/drivers/common/cnxk/roc_nix_bpf.c b/drivers/common/cnxk/roc_nix_bpf.c\nindex 6996a54b..e483559e 100644\n--- a/drivers/common/cnxk/roc_nix_bpf.c\n+++ b/drivers/common/cnxk/roc_nix_bpf.c\n@@ -138,11 +138,11 @@ nix_lf_bpf_dump(__io struct nix_band_prof_s *bpf)\n {\n \tplt_dump(\"W0: cir_mantissa  \\t\\t\\t%d\\nW0: pebs_mantissa \\t\\t\\t0x%03x\",\n \t\t bpf->cir_mantissa, bpf->pebs_mantissa);\n-\tplt_dump(\"W0: peir_matissa \\t\\t\\t\\t%d\\nW0: cbs_exponent \\t\\t\\t%d\",\n-\t\t bpf->peir_mantissa, bpf->cbs_exponent);\n+\tplt_dump(\"W0: peer_mantissa \\t\\t\\t\\t%d\\nW0: cbs_exponent \\t\\t\\t%d\",\n+\t\t bpf->peer_mantissa, bpf->cbs_exponent);\n \tplt_dump(\"W0: cir_exponent \\t\\t\\t%d\\nW0: pebs_exponent \\t\\t\\t%d\",\n \t\t bpf->cir_exponent, bpf->pebs_exponent);\n-\tplt_dump(\"W0: peir_exponent \\t\\t\\t%d\\n\", bpf->peir_exponent);\n+\tplt_dump(\"W0: peer_exponent \\t\\t\\t%d\\n\", bpf->peer_exponent);\n \tplt_dump(\"W0: tnl_ena \\t\\t\\t%d\\n\", bpf->tnl_ena);\n \tplt_dump(\"W0: icolor \\t\\t\\t%d\\n\", bpf->icolor);\n \tplt_dump(\"W0: pc_mode \\t\\t\\t%d\\n\", bpf->pc_mode);\n@@ -608,8 +608,8 @@ roc_nix_bpf_config(struct roc_nix *roc_nix, uint16_t id,\n \n \t\tmeter_rate_to_nix(cfg->algo2698.pir, &exponent_p, &mantissa_p,\n \t\t\t\t  &div_exp_p, policer_timeunit);\n-\t\taq->prof.peir_mantissa = mantissa_p;\n-\t\taq->prof.peir_exponent = exponent_p;\n+\t\taq->prof.peer_mantissa = mantissa_p;\n+\t\taq->prof.peer_exponent = exponent_p;\n \n \t\tmeter_burst_to_nix(cfg->algo2698.cbs, &exponent_p, &mantissa_p);\n \t\taq->prof.cbs_mantissa = mantissa_p;\n@@ -620,11 +620,11 @@ roc_nix_bpf_config(struct roc_nix *roc_nix, uint16_t id,\n \t\taq->prof.pebs_exponent = exponent_p;\n \n \t\taq->prof_mask.cir_mantissa = ~(aq->prof_mask.cir_mantissa);\n-\t\taq->prof_mask.peir_mantissa = ~(aq->prof_mask.peir_mantissa);\n+\t\taq->prof_mask.peer_mantissa = ~(aq->prof_mask.peer_mantissa);\n \t\taq->prof_mask.cbs_mantissa = ~(aq->prof_mask.cbs_mantissa);\n \t\taq->prof_mask.pebs_mantissa = ~(aq->prof_mask.pebs_mantissa);\n \t\taq->prof_mask.cir_exponent = ~(aq->prof_mask.cir_exponent);\n-\t\taq->prof_mask.peir_exponent = ~(aq->prof_mask.peir_exponent);\n+\t\taq->prof_mask.peer_exponent = ~(aq->prof_mask.peer_exponent);\n \t\taq->prof_mask.cbs_exponent = ~(aq->prof_mask.cbs_exponent);\n \t\taq->prof_mask.pebs_exponent = ~(aq->prof_mask.pebs_exponent);\n \t\tbreak;\n@@ -637,8 +637,8 @@ roc_nix_bpf_config(struct roc_nix *roc_nix, uint16_t id,\n \n \t\tmeter_rate_to_nix(cfg->algo4115.eir, &exponent_p, &mantissa_p,\n \t\t\t\t  &div_exp_p, policer_timeunit);\n-\t\taq->prof.peir_mantissa = mantissa_p;\n-\t\taq->prof.peir_exponent = exponent_p;\n+\t\taq->prof.peer_mantissa = mantissa_p;\n+\t\taq->prof.peer_exponent = exponent_p;\n \n \t\tmeter_burst_to_nix(cfg->algo4115.cbs, &exponent_p, &mantissa_p);\n \t\taq->prof.cbs_mantissa = mantissa_p;\n@@ -649,12 +649,12 @@ roc_nix_bpf_config(struct roc_nix *roc_nix, uint16_t id,\n \t\taq->prof.pebs_exponent = exponent_p;\n \n \t\taq->prof_mask.cir_mantissa = ~(aq->prof_mask.cir_mantissa);\n-\t\taq->prof_mask.peir_mantissa = ~(aq->prof_mask.peir_mantissa);\n+\t\taq->prof_mask.peer_mantissa = ~(aq->prof_mask.peer_mantissa);\n \t\taq->prof_mask.cbs_mantissa = ~(aq->prof_mask.cbs_mantissa);\n \t\taq->prof_mask.pebs_mantissa = ~(aq->prof_mask.pebs_mantissa);\n \n \t\taq->prof_mask.cir_exponent = ~(aq->prof_mask.cir_exponent);\n-\t\taq->prof_mask.peir_exponent = ~(aq->prof_mask.peir_exponent);\n+\t\taq->prof_mask.peer_exponent = ~(aq->prof_mask.peer_exponent);\n \t\taq->prof_mask.cbs_exponent = ~(aq->prof_mask.cbs_exponent);\n \t\taq->prof_mask.pebs_exponent = ~(aq->prof_mask.pebs_exponent);\n \t\tbreak;\ndiff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c\nindex 3257fa67..3d81247a 100644\n--- a/drivers/common/cnxk/roc_nix_tm_ops.c\n+++ b/drivers/common/cnxk/roc_nix_tm_ops.c\n@@ -107,7 +107,7 @@ nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile *profile)\n \tif (profile->peak.rate && min_rate > profile->peak.rate)\n \t\tmin_rate = profile->peak.rate;\n \n-\t/* Each packet accomulate single count, whereas HW\n+\t/* Each packet accumulate single count, whereas HW\n \t * considers each unit as Byte, so we need convert\n \t * user pps to bps\n \t */\ndiff --git a/drivers/common/cnxk/roc_npc_mcam.c b/drivers/common/cnxk/roc_npc_mcam.c\nindex ba7f89b4..82014a2c 100644\n--- a/drivers/common/cnxk/roc_npc_mcam.c\n+++ b/drivers/common/cnxk/roc_npc_mcam.c\n@@ -234,7 +234,7 @@ npc_get_kex_capability(struct npc *npc)\n \t/* Ethtype: Offset 12B, len 2B */\n \tkex_cap.bit.ethtype_0 = npc_is_kex_enabled(\n \t\tnpc, NPC_LID_LA, NPC_LT_LA_ETHER, 12 * 8, 2 * 8);\n-\t/* QINQ VLAN Ethtype: ofset 8B, len 2B */\n+\t/* QINQ VLAN Ethtype: offset 8B, len 2B */\n \tkex_cap.bit.ethtype_x = npc_is_kex_enabled(\n \t\tnpc, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8 * 8, 2 * 8);\n \t/* VLAN ID0 : Outer VLAN: Offset 2B, len 2B */\ndiff --git a/drivers/common/cnxk/roc_npc_priv.h b/drivers/common/cnxk/roc_npc_priv.h\nindex 712302bc..74e0fb2e 100644\n--- a/drivers/common/cnxk/roc_npc_priv.h\n+++ b/drivers/common/cnxk/roc_npc_priv.h\n@@ -363,7 +363,7 @@ struct npc {\n \tuint32_t rss_grps;\t\t\t/* rss groups supported */\n \tuint16_t flow_prealloc_size;\t\t/* Pre allocated mcam size */\n \tuint16_t flow_max_priority;\t\t/* Max priority for flow */\n-\tuint16_t switch_header_type; /* Suppprted switch header type */\n+\tuint16_t switch_header_type; /* Supported switch header type */\n \tuint32_t mark_actions;\t     /* Number of mark actions */\n \tuint32_t vtag_strip_actions; /* vtag insert/strip actions */\n \tuint16_t pf_func;\t     /* pf_func of device */\ndiff --git a/drivers/common/cnxk/roc_tim.c b/drivers/common/cnxk/roc_tim.c\nindex 534b697b..ca58e19a 100644\n--- a/drivers/common/cnxk/roc_tim.c\n+++ b/drivers/common/cnxk/roc_tim.c\n@@ -73,7 +73,7 @@ tim_err_desc(int rc)\n \tcase TIM_AF_INVALID_ENABLE_DONTFREE:\n \t\tplt_err(\"Invalid Don't free value.\");\n \t\tbreak;\n-\tcase TIM_AF_ENA_DONTFRE_NSET_PERIODIC:\n+\tcase TIM_AF_ENA_DONTFREE_NSET_PERIODIC:\n \t\tplt_err(\"Don't free bit not set when periodic is enabled.\");\n \t\tbreak;\n \tcase TIM_AF_RING_ALREADY_DISABLED:\ndiff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h\nindex e015cf66..e1f2f600 100644\n--- a/drivers/common/cpt/cpt_ucode.h\n+++ b/drivers/common/cpt/cpt_ucode.h\n@@ -246,7 +246,7 @@ cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,\n \tif (cpt_ctx->fc_type == FC_GEN) {\n \t\t/*\n \t\t * We need to always say IV is from DPTR as user can\n-\t\t * sometimes iverride IV per operation.\n+\t\t * sometimes override IV per operation.\n \t\t */\n \t\tfctx->enc.iv_source = CPT_FROM_DPTR;\n \n@@ -3035,7 +3035,7 @@ prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,\n \t\ttailroom = rte_pktmbuf_tailroom(pkt);\n \t\tif (likely((headroom >= 24) &&\n \t\t    (tailroom >= 8))) {\n-\t\t\t/* In 83XX this is prerequivisit for Direct mode */\n+\t\t\t/* In 83XX this is prerequisite for Direct mode */\n \t\t\t*flags |= SINGLE_BUF_HEADTAILROOM;\n \t\t}\n \t\tparam->bufs[0].vaddr = seg_data;\ndiff --git a/drivers/common/cpt/cpt_ucode_asym.h b/drivers/common/cpt/cpt_ucode_asym.h\nindex a67ded64..30dd5399 100644\n--- a/drivers/common/cpt/cpt_ucode_asym.h\n+++ b/drivers/common/cpt/cpt_ucode_asym.h\n@@ -779,7 +779,7 @@ cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,\n \t * Set dlen = sum(sizeof(fpm address), ROUNDUP8(message len),\n \t * ROUNDUP8(sign len(r and s), public key len(x and y coordinates),\n \t * prime len, order len)).\n-\t * Please note sign, public key and order can not excede prime length\n+\t * Please note sign, public key and order can not exede prime length\n \t * i.e. 6 * p_align\n \t */\n \tdlen = sizeof(fpm_table_iova) + m_align + (8 * p_align);\ndiff --git a/drivers/common/dpaax/caamflib/desc/algo.h b/drivers/common/dpaax/caamflib/desc/algo.h\nindex 6bb91505..e0848f09 100644\n--- a/drivers/common/dpaax/caamflib/desc/algo.h\n+++ b/drivers/common/dpaax/caamflib/desc/algo.h\n@@ -67,7 +67,7 @@ cnstr_shdsc_zuce(uint32_t *descbuf, bool ps, bool swap,\n  * @authlen: size of digest\n  *\n  * The IV prepended before hmac payload must be 8 bytes consisting\n- * of COUNT||BEAERER||DIR. The COUNT is of 32-bits, bearer is of 5 bits and\n+ * of COUNT||BEARER||DIR. The COUNT is of 32-bits, bearer is of 5 bits and\n  * direction is of 1 bit - totalling to 38 bits.\n  *\n  * Return: size of descriptor written in words or negative number on error\ndiff --git a/drivers/common/dpaax/caamflib/desc/ipsec.h b/drivers/common/dpaax/caamflib/desc/ipsec.h\nindex 668d2164..499f4f93 100644\n--- a/drivers/common/dpaax/caamflib/desc/ipsec.h\n+++ b/drivers/common/dpaax/caamflib/desc/ipsec.h\n@@ -1437,7 +1437,7 @@ cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,\n \t\t\t\t\t\tCAAM_CMD_SZ)\n \n /**\n- * cnstr_shdsc_authenc - authenc-like descriptor\n+ * cnstr_shdsc_authentic - authentic-like descriptor\n  * @descbuf: pointer to buffer used for descriptor construction\n  * @ps: if 36/40bit addressing is desired, this parameter must be true\n  * @swap: if true, perform descriptor byte swapping on a 4-byte boundary\n@@ -1502,7 +1502,7 @@ cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,\n  * Return: size of descriptor written in words or negative number on error\n  */\n static inline int\n-cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,\n+cnstr_shdsc_authentic(uint32_t *descbuf, bool ps, bool swap,\n \t\t    enum rta_share_type share,\n \t\t    struct alginfo *cipherdata,\n \t\t    struct alginfo *authdata,\ndiff --git a/drivers/common/dpaax/caamflib/desc/sdap.h b/drivers/common/dpaax/caamflib/desc/sdap.h\nindex b2497a54..07f55b5b 100644\n--- a/drivers/common/dpaax/caamflib/desc/sdap.h\n+++ b/drivers/common/dpaax/caamflib/desc/sdap.h\n@@ -492,10 +492,10 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,\n \n \t/* Set the variable size of data the register will write */\n \tif (dir == OP_TYPE_ENCAP_PROTOCOL) {\n-\t\t/* We will add the interity data so add its length */\n+\t\t/* We will add the integrity data so add its length */\n \t\tMATHI(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);\n \t} else {\n-\t\t/* We will check the interity data so remove its length */\n+\t\t/* We will check the integrity data so remove its length */\n \t\tMATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);\n \t\t/* Do not take the ICV in the out-snooping configuration */\n \t\tMATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4, IMMED2);\n@@ -803,7 +803,7 @@ static inline int pdcp_sdap_insert_no_snoop_op(\n \t\t     CLRW_CLR_C1MODE,\n \t\t     CLRW, 0, 4, IMMED);\n \n-\t\t/* Load the key for authentcation */\n+\t\t/* Load the key for authentication */\n \t\tKEY(p, KEY1, authdata->key_enc_flags, authdata->key,\n \t\t    authdata->keylen, INLINE_KEY(authdata));\n \ndiff --git a/drivers/common/dpaax/caamflib/rta/operation_cmd.h b/drivers/common/dpaax/caamflib/rta/operation_cmd.h\nindex 3d339cb0..e456ad3c 100644\n--- a/drivers/common/dpaax/caamflib/rta/operation_cmd.h\n+++ b/drivers/common/dpaax/caamflib/rta/operation_cmd.h\n@@ -199,7 +199,7 @@ __rta_alg_aai_zuca(uint16_t aai)\n }\n \n struct alg_aai_map {\n-\tuint32_t chipher_algo;\n+\tuint32_t cipher_algo;\n \tint (*aai_func)(uint16_t);\n \tuint32_t class;\n };\n@@ -242,7 +242,7 @@ rta_operation(struct program *program, uint32_t cipher_algo,\n \tint ret;\n \n \tfor (i = 0; i < alg_table_sz[rta_sec_era]; i++) {\n-\t\tif (alg_table[i].chipher_algo == cipher_algo) {\n+\t\tif (alg_table[i].cipher_algo == cipher_algo) {\n \t\t\tif ((aai ==  OP_ALG_AAI_XCBC_MAC) ||\n \t\t\t\t\t(aai == OP_ALG_AAI_CBC_XCBCMAC))\n \t\t\t\topcode |= cipher_algo | OP_TYPE_CLASS2_ALG;\n@@ -340,7 +340,7 @@ rta_operation2(struct program *program, uint32_t cipher_algo,\n \tint ret;\n \n \tfor (i = 0; i < alg_table_sz[rta_sec_era]; i++) {\n-\t\tif (alg_table[i].chipher_algo == cipher_algo) {\n+\t\tif (alg_table[i].cipher_algo == cipher_algo) {\n \t\t\tif ((aai ==  OP_ALG_AAI_XCBC_MAC) ||\n \t\t\t\t\t(aai == OP_ALG_AAI_CBC_XCBCMAC) ||\n \t\t\t\t\t(aai == OP_ALG_AAI_CMAC))\ndiff --git a/drivers/common/dpaax/dpaax_iova_table.c b/drivers/common/dpaax/dpaax_iova_table.c\nindex 3d661102..9daac4bc 100644\n--- a/drivers/common/dpaax/dpaax_iova_table.c\n+++ b/drivers/common/dpaax/dpaax_iova_table.c\n@@ -261,7 +261,7 @@ dpaax_iova_table_depopulate(void)\n \trte_free(dpaax_iova_table_p->entries);\n \tdpaax_iova_table_p = NULL;\n \n-\tDPAAX_DEBUG(\"IOVA Table cleanedup\");\n+\tDPAAX_DEBUG(\"IOVA Table cleaned\");\n }\n \n int\ndiff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h\nindex 51267ca3..1cd87587 100644\n--- a/drivers/common/iavf/iavf_type.h\n+++ b/drivers/common/iavf/iavf_type.h\n@@ -1006,7 +1006,7 @@ struct iavf_profile_tlv_section_record {\n \tu8 data[12];\n };\n \n-/* Generic AQ section in proflie */\n+/* Generic AQ section in profile */\n struct iavf_profile_aq_section {\n \tu16 opcode;\n \tu16 flags;\ndiff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h\nindex 269578f7..80e754a1 100644\n--- a/drivers/common/iavf/virtchnl.h\n+++ b/drivers/common/iavf/virtchnl.h\n@@ -233,7 +233,7 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)\n \tcase VIRTCHNL_OP_DCF_CMD_DESC:\n \t\treturn \"VIRTCHNL_OP_DCF_CMD_DESC\";\n \tcase VIRTCHNL_OP_DCF_CMD_BUFF:\n-\t\treturn \"VIRTCHHNL_OP_DCF_CMD_BUFF\";\n+\t\treturn \"VIRTCHNL_OP_DCF_CMD_BUFF\";\n \tcase VIRTCHNL_OP_DCF_DISABLE:\n \t\treturn \"VIRTCHNL_OP_DCF_DISABLE\";\n \tcase VIRTCHNL_OP_DCF_GET_VSI_MAP:\ndiff --git a/drivers/common/mlx5/mlx5_common.c b/drivers/common/mlx5/mlx5_common.c\nindex f1650f94..cc130221 100644\n--- a/drivers/common/mlx5/mlx5_common.c\n+++ b/drivers/common/mlx5/mlx5_common.c\n@@ -854,7 +854,7 @@ static void mlx5_common_driver_init(void)\n static bool mlx5_common_initialized;\n \n /**\n- * One time innitialization routine for run-time dependency on glue library\n+ * One time initialization routine for run-time dependency on glue library\n  * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,\n  * must invoke in its constructor.\n  */\ndiff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c\nindex c694aaf2..1537b5d4 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.c\n+++ b/drivers/common/mlx5/mlx5_common_mr.c\n@@ -1541,7 +1541,7 @@ mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n,\n  * Destroy a mempool registration object.\n  *\n  * @param standalone\n- *   Whether @p mpr owns its MRs excludively, i.e. they are not shared.\n+ *   Whether @p mpr owns its MRs exclusively, i.e. they are not shared.\n  */\n static void\n mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,\ndiff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c\nindex e52b995e..7cd3d4fa 100644\n--- a/drivers/common/mlx5/mlx5_devx_cmds.c\n+++ b/drivers/common/mlx5/mlx5_devx_cmds.c\n@@ -1822,7 +1822,7 @@ mlx5_devx_cmd_create_td(void *ctx)\n  *   Pointer to file stream.\n  *\n  * @return\n- *   0 on success, a nagative value otherwise.\n+ *   0 on success, a negative value otherwise.\n  */\n int\n mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,\ndiff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h\nindex d7f71646..2dbbed24 100644\n--- a/drivers/common/mlx5/mlx5_devx_cmds.h\n+++ b/drivers/common/mlx5/mlx5_devx_cmds.h\n@@ -128,7 +128,7 @@ enum {\n \n enum {\n \tPARSE_GRAPH_NODE_CAP_LENGTH_MODE_FIXED          = RTE_BIT32(0),\n-\tPARSE_GRAPH_NODE_CAP_LENGTH_MODE_EXPLISIT_FIELD = RTE_BIT32(1),\n+\tPARSE_GRAPH_NODE_CAP_LENGTH_MODE_EXPLICIT_FIELD = RTE_BIT32(1),\n \tPARSE_GRAPH_NODE_CAP_LENGTH_MODE_BITMASK_FIELD  = RTE_BIT32(2)\n };\n \ndiff --git a/drivers/common/mlx5/mlx5_malloc.c b/drivers/common/mlx5/mlx5_malloc.c\nindex b19501e1..cef3b88e 100644\n--- a/drivers/common/mlx5/mlx5_malloc.c\n+++ b/drivers/common/mlx5/mlx5_malloc.c\n@@ -58,7 +58,7 @@ static struct mlx5_sys_mem mlx5_sys_mem = {\n  * Check if the address belongs to memory seg list.\n  *\n  * @param addr\n- *   Memory address to be ckeced.\n+ *   Memory address to be checked.\n  * @param msl\n  *   Memory seg list.\n  *\n@@ -109,7 +109,7 @@ mlx5_mem_update_msl(void *addr)\n  * Check if the address belongs to rte memory.\n  *\n  * @param addr\n- *   Memory address to be ckeced.\n+ *   Memory address to be checked.\n  *\n  * @return\n  *   True if it belongs, false otherwise.\ndiff --git a/drivers/common/mlx5/mlx5_malloc.h b/drivers/common/mlx5/mlx5_malloc.h\nindex 74b7eeb2..92149f7b 100644\n--- a/drivers/common/mlx5/mlx5_malloc.h\n+++ b/drivers/common/mlx5/mlx5_malloc.h\n@@ -19,7 +19,7 @@ extern \"C\" {\n \n enum mlx5_mem_flags {\n \tMLX5_MEM_ANY = 0,\n-\t/* Memory will be allocated dpends on sys_mem_en. */\n+\t/* Memory will be allocated depends on sys_mem_en. */\n \tMLX5_MEM_SYS = 1 << 0,\n \t/* Memory should be allocated from system. */\n \tMLX5_MEM_RTE = 1 << 1,\ndiff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h\nindex 2ded67e8..d921d525 100644\n--- a/drivers/common/mlx5/mlx5_prm.h\n+++ b/drivers/common/mlx5/mlx5_prm.h\n@@ -3112,8 +3112,8 @@ struct mlx5_ifc_conn_track_aso_bits {\n \tu8 max_ack_window[0x3];\n \tu8 reserved_at_1f8[0x1];\n \tu8 retransmission_counter[0x3];\n-\tu8 retranmission_limit_exceeded[0x1];\n-\tu8 retranmission_limit[0x3]; /* End of DW15. */\n+\tu8 retransmission_limit_exceeded[0x1];\n+\tu8 retransmission_limit[0x3]; /* End of DW15. */\n };\n \n struct mlx5_ifc_conn_track_offload_bits {\n@@ -4172,7 +4172,7 @@ mlx5_flow_mark_get(uint32_t val)\n  *   timestamp format supported by the queue.\n  *\n  * @return\n- *   Converted timstamp format settings.\n+ *   Converted timestamp format settings.\n  */\n static inline uint32_t\n mlx5_ts_format_conv(uint32_t ts_format)\ndiff --git a/drivers/common/mlx5/windows/mlx5_common_os.c b/drivers/common/mlx5/windows/mlx5_common_os.c\nindex 162c7476..c3cfc315 100644\n--- a/drivers/common/mlx5/windows/mlx5_common_os.c\n+++ b/drivers/common/mlx5/windows/mlx5_common_os.c\n@@ -302,7 +302,7 @@ mlx5_os_umem_dereg(void *pumem)\n }\n \n /**\n- * Register mr. Given protection doamin pointer, pointer to addr and length\n+ * Register mr. Given protection domain pointer, pointer to addr and length\n  * register the memory region.\n  *\n  * @param[in] pd\n@@ -310,7 +310,7 @@ mlx5_os_umem_dereg(void *pumem)\n  * @param[in] addr\n  *   Pointer to memory start address (type devx_device_ctx).\n  * @param[in] length\n- *   Lengtoh of the memory to register.\n+ *   Length of the memory to register.\n  * @param[out] pmd_mr\n  *   pmd_mr struct set with lkey, address, length, pointer to mr object, mkey\n  *\ndiff --git a/drivers/common/mlx5/windows/mlx5_common_os.h b/drivers/common/mlx5/windows/mlx5_common_os.h\nindex 3afce56c..61fc8dd7 100644\n--- a/drivers/common/mlx5/windows/mlx5_common_os.h\n+++ b/drivers/common/mlx5/windows/mlx5_common_os.h\n@@ -21,7 +21,7 @@\n /**\n  * This API allocates aligned or non-aligned memory.  The free can be on either\n  * aligned or nonaligned memory.  To be protected - even though there may be no\n- * alignment - in Windows this API will unconditioanlly call _aligned_malloc()\n+ * alignment - in Windows this API will unconditionally call _aligned_malloc()\n  * with at least a minimal alignment size.\n  *\n  * @param[in] align\ndiff --git a/drivers/common/octeontx2/otx2_mbox.h b/drivers/common/octeontx2/otx2_mbox.h\nindex 25b521a7..8d8fe58d 100644\n--- a/drivers/common/octeontx2/otx2_mbox.h\n+++ b/drivers/common/octeontx2/otx2_mbox.h\n@@ -1296,7 +1296,7 @@ struct cpt_lf_alloc_req_msg {\n \n struct cpt_lf_alloc_rsp_msg {\n \tstruct mbox_msghdr hdr;\n-\tuint16_t __otx2_io eng_grpmsk;\n+\tuint16_t __otx2_io eng_grpmask;\n };\n \n #define CPT_INLINE_INBOUND\t0\n@@ -1625,7 +1625,7 @@ enum tim_af_status {\n \tTIM_AF_INVALID_BSIZE\t\t\t= -813,\n \tTIM_AF_INVALID_ENABLE_PERIODIC\t\t= -814,\n \tTIM_AF_INVALID_ENABLE_DONTFREE\t\t= -815,\n-\tTIM_AF_ENA_DONTFRE_NSET_PERIODIC\t= -816,\n+\tTIM_AF_ENA_DONTFREE_NSET_PERIODIC\t= -816,\n \tTIM_AF_RING_ALREADY_DISABLED\t\t= -817,\n };\n \ndiff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h\nindex a6d403fa..12a7258c 100644\n--- a/drivers/common/qat/qat_adf/adf_transport_access_macros.h\n+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h\n@@ -72,7 +72,7 @@\n #define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)\n #define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)\n \n-/* Minimum ring bufer size for memory allocation */\n+/* Minimum ring buffer size for memory allocation */\n #define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \\\n \t\t\t\tADF_RING_SIZE_4K : SIZE)\n #define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)\ndiff --git a/drivers/common/sfc_efx/efsys.h b/drivers/common/sfc_efx/efsys.h\nindex 3860c283..224254be 100644\n--- a/drivers/common/sfc_efx/efsys.h\n+++ b/drivers/common/sfc_efx/efsys.h\n@@ -616,7 +616,7 @@ typedef struct efsys_bar_s {\n \n #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)\t((void)0)\n \n-/* Just avoid store and compiler (impliciltly) reordering */\n+/* Just avoid store and compiler (implicitly) reordering */\n #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)\trte_wmb()\n \n /* TIMESTAMP */\ndiff --git a/drivers/compress/octeontx/include/zip_regs.h b/drivers/compress/octeontx/include/zip_regs.h\nindex 96e538bb..94a48cde 100644\n--- a/drivers/compress/octeontx/include/zip_regs.h\n+++ b/drivers/compress/octeontx/include/zip_regs.h\n@@ -195,7 +195,7 @@ union zip_inst_s {\n \t\tuint64_t bf                    : 1;\n \t\t/** Comp/decomp operation */\n \t\tuint64_t op                    : 2;\n-\t\t/** Data sactter */\n+\t\t/** Data scatter */\n \t\tuint64_t ds                    : 1;\n \t\t/** Data gather */\n \t\tuint64_t dg                    : 1;\n@@ -376,7 +376,7 @@ union zip_inst_s {\n \t\tuint64_t bf                    : 1;\n \t\t/** Comp/decomp operation */\n \t\tuint64_t op                    : 2;\n-\t\t/** Data sactter */\n+\t\t/** Data scatter */\n \t\tuint64_t ds                    : 1;\n \t\t/** Data gather */\n \t\tuint64_t dg                    : 1;\ndiff --git a/drivers/compress/octeontx/otx_zip.h b/drivers/compress/octeontx/otx_zip.h\nindex e43f7f5c..e29b8b87 100644\n--- a/drivers/compress/octeontx/otx_zip.h\n+++ b/drivers/compress/octeontx/otx_zip.h\n@@ -31,7 +31,7 @@ extern int octtx_zip_logtype_driver;\n /**< PCI device id of ZIP VF */\n #define PCI_DEVICE_ID_OCTEONTX_ZIPVF\t0xA037\n \n-/* maxmum number of zip vf devices */\n+/* maximum number of zip vf devices */\n #define ZIP_MAX_VFS 8\n \n /* max size of one chunk */\n@@ -66,7 +66,7 @@ extern int octtx_zip_logtype_driver;\n \t((_align) * (((x) + (_align) - 1) / (_align)))\n \n /**< ZIP PMD device name */\n-#define COMPRESSDEV_NAME_ZIP_PMD\tcompress_octeonx\n+#define COMPRESSDEV_NAME_ZIP_PMD\tcompress_octeontx\n \n #define ZIP_PMD_LOG(level, fmt, args...) \\\n \trte_log(RTE_LOG_ ## level, \\\ndiff --git a/drivers/compress/qat/dev/qat_comp_pmd_gen1.c b/drivers/compress/qat/dev/qat_comp_pmd_gen1.c\nindex 12d9d890..f92250d3 100644\n--- a/drivers/compress/qat/dev/qat_comp_pmd_gen1.c\n+++ b/drivers/compress/qat/dev/qat_comp_pmd_gen1.c\n@@ -39,10 +39,10 @@ qat_comp_dev_config_gen1(struct rte_compressdev *dev,\n \t\t\t\"RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so\"\n \t\t\t\" QAT device can't be used for Dynamic Deflate.\");\n \t} else {\n-\t\tcomp_dev->interm_buff_mz =\n+\t\tcomp_dev->interim_buff_mz =\n \t\t\t\tqat_comp_setup_inter_buffers(comp_dev,\n \t\t\t\t\tRTE_PMD_QAT_COMP_IM_BUFFER_SIZE);\n-\t\tif (comp_dev->interm_buff_mz == NULL)\n+\t\tif (comp_dev->interim_buff_mz == NULL)\n \t\t\treturn -ENOMEM;\n \t}\n \ndiff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c\nindex e8f57c3c..2a3ce2ad 100644\n--- a/drivers/compress/qat/qat_comp.c\n+++ b/drivers/compress/qat/qat_comp.c\n@@ -815,7 +815,7 @@ qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,\n \n static int\n qat_comp_create_templates(struct qat_comp_xform *qat_xform,\n-\t\t\t  const struct rte_memzone *interm_buff_mz,\n+\t\t\t  const struct rte_memzone *interim_buff_mz,\n \t\t\t  const struct rte_comp_xform *xform,\n \t\t\t  const struct qat_comp_stream *stream,\n \t\t\t  enum rte_comp_op_type op_type,\n@@ -923,7 +923,7 @@ qat_comp_create_templates(struct qat_comp_xform *qat_xform,\n \n \t\tcomp_req->u1.xlt_pars.inter_buff_ptr =\n \t\t\t\t(qat_comp_get_num_im_bufs_required(qat_dev_gen)\n-\t\t\t\t\t== 0) ? 0 : interm_buff_mz->iova;\n+\t\t\t\t\t== 0) ? 0 : interim_buff_mz->iova;\n \t}\n \n #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n@@ -979,7 +979,7 @@ qat_comp_private_xform_create(struct rte_compressdev *dev,\n \n \t\tif (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||\n \t\t  ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)\n-\t\t\t\t   && qat->interm_buff_mz == NULL\n+\t\t\t\t   && qat->interim_buff_mz == NULL\n \t\t\t\t   && im_bufs > 0))\n \t\t\tqat_xform->qat_comp_request_type =\n \t\t\t\t\tQAT_COMP_REQUEST_FIXED_COMP_STATELESS;\n@@ -988,7 +988,7 @@ qat_comp_private_xform_create(struct rte_compressdev *dev,\n \t\t\t\tRTE_COMP_HUFFMAN_DYNAMIC ||\n \t\t\t\txform->compress.deflate.huffman ==\n \t\t\t\t\t\tRTE_COMP_HUFFMAN_DEFAULT) &&\n-\t\t\t\t(qat->interm_buff_mz != NULL ||\n+\t\t\t\t(qat->interim_buff_mz != NULL ||\n \t\t\t\t\t\tim_bufs == 0))\n \n \t\t\tqat_xform->qat_comp_request_type =\n@@ -1007,7 +1007,7 @@ qat_comp_private_xform_create(struct rte_compressdev *dev,\n \t\tqat_xform->checksum_type = xform->decompress.chksum;\n \t}\n \n-\tif (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform,\n+\tif (qat_comp_create_templates(qat_xform, qat->interim_buff_mz, xform,\n \t\t\t\t      NULL, RTE_COMP_OP_STATELESS,\n \t\t\t\t      qat_dev_gen)) {\n \t\tQAT_LOG(ERR, \"QAT: Problem with setting compression\");\n@@ -1107,7 +1107,7 @@ qat_comp_stream_create(struct rte_compressdev *dev,\n \tptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;\n \tptr->qat_xform.checksum_type = xform->decompress.chksum;\n \n-\tif (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz,\n+\tif (qat_comp_create_templates(&ptr->qat_xform, qat->interim_buff_mz,\n \t\t\t\t      xform, ptr, RTE_COMP_OP_STATEFUL,\n \t\t\t\t      qat->qat_dev->qat_dev_gen)) {\n \t\tQAT_LOG(ERR, \"QAT: problem with creating descriptor template for stream\");\ndiff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c\nindex 9b24d46e..ebb93acc 100644\n--- a/drivers/compress/qat/qat_comp_pmd.c\n+++ b/drivers/compress/qat/qat_comp_pmd.c\n@@ -463,7 +463,7 @@ qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev,\n \t\t} else if (info.error) {\n \t\t\trte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);\n \t\t\tQAT_LOG(ERR,\n-\t\t\t     \"Destoying mempool %s as at least one element failed initialisation\",\n+\t\t\t     \"Destroying mempool %s as at least one element failed initialisation\",\n \t\t\t     stream_pool_name);\n \t\t\trte_mempool_free(mp);\n \t\t\tmp = NULL;\n@@ -477,7 +477,7 @@ static void\n _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)\n {\n \t/* Free intermediate buffers */\n-\tif (comp_dev->interm_buff_mz) {\n+\tif (comp_dev->interim_buff_mz) {\n \t\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n \t\tint i = qat_comp_get_num_im_bufs_required(\n \t\t\t\tcomp_dev->qat_dev->qat_dev_gen);\n@@ -488,8 +488,8 @@ _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)\n \t\t\t\t\tcomp_dev->qat_dev->name, i);\n \t\t\trte_memzone_free(rte_memzone_lookup(mz_name));\n \t\t}\n-\t\trte_memzone_free(comp_dev->interm_buff_mz);\n-\t\tcomp_dev->interm_buff_mz = NULL;\n+\t\trte_memzone_free(comp_dev->interim_buff_mz);\n+\t\tcomp_dev->interim_buff_mz = NULL;\n \t}\n \n \t/* Free private_xform pool */\ndiff --git a/drivers/compress/qat/qat_comp_pmd.h b/drivers/compress/qat/qat_comp_pmd.h\nindex 3c8682a7..8331b54d 100644\n--- a/drivers/compress/qat/qat_comp_pmd.h\n+++ b/drivers/compress/qat/qat_comp_pmd.h\n@@ -60,7 +60,7 @@ struct qat_comp_dev_private {\n \t/**< The pointer to this compression device structure */\n \tconst struct rte_compressdev_capabilities *qat_dev_capabilities;\n \t/* QAT device compression capabilities */\n-\tconst struct rte_memzone *interm_buff_mz;\n+\tconst struct rte_memzone *interim_buff_mz;\n \t/**< The device's memory for intermediate buffers */\n \tstruct rte_mempool *xformpool;\n \t/**< The device's pool for qat_comp_xforms */\ndiff --git a/drivers/crypto/bcmfs/bcmfs_device.h b/drivers/crypto/bcmfs/bcmfs_device.h\nindex e5ca8669..4901a6cf 100644\n--- a/drivers/crypto/bcmfs/bcmfs_device.h\n+++ b/drivers/crypto/bcmfs/bcmfs_device.h\n@@ -32,7 +32,7 @@ enum bcmfs_device_type {\n \tBCMFS_UNKNOWN\n };\n \n-/* A table to store registered queue pair opertations */\n+/* A table to store registered queue pair operations */\n struct bcmfs_hw_queue_pair_ops_table {\n \trte_spinlock_t tl;\n \t/* Number of used ops structs in the table. */\ndiff --git a/drivers/crypto/bcmfs/bcmfs_qp.c b/drivers/crypto/bcmfs/bcmfs_qp.c\nindex cb5ff6c6..61d457f4 100644\n--- a/drivers/crypto/bcmfs/bcmfs_qp.c\n+++ b/drivers/crypto/bcmfs/bcmfs_qp.c\n@@ -212,7 +212,7 @@ bcmfs_qp_setup(struct bcmfs_qp **qp_addr,\n \t\tnb_descriptors = FS_RM_MAX_REQS;\n \n \tif (qp_conf->iobase == NULL) {\n-\t\tBCMFS_LOG(ERR, \"IO onfig space null\");\n+\t\tBCMFS_LOG(ERR, \"IO config space null\");\n \t\treturn -EINVAL;\n \t}\n \ndiff --git a/drivers/crypto/bcmfs/bcmfs_sym_defs.h b/drivers/crypto/bcmfs/bcmfs_sym_defs.h\nindex eaefe97e..9bb8a695 100644\n--- a/drivers/crypto/bcmfs/bcmfs_sym_defs.h\n+++ b/drivers/crypto/bcmfs/bcmfs_sym_defs.h\n@@ -20,11 +20,11 @@ struct bcmfs_sym_request;\n \n /** Crypto Request processing successful. */\n #define BCMFS_SYM_RESPONSE_SUCCESS               (0)\n-/** Crypot Request processing protocol failure. */\n+/** Crypto Request processing protocol failure. */\n #define BCMFS_SYM_RESPONSE_PROTO_FAILURE         (1)\n-/** Crypot Request processing completion failure. */\n+/** Crypto Request processing completion failure. */\n #define BCMFS_SYM_RESPONSE_COMPL_ERROR           (2)\n-/** Crypot Request processing hash tag check error. */\n+/** Crypto Request processing hash tag check error. */\n #define BCMFS_SYM_RESPONSE_HASH_TAG_ERROR        (3)\n \n /** Maximum threshold length to adjust AAD in continuation\ndiff --git a/drivers/crypto/bcmfs/bcmfs_sym_engine.h b/drivers/crypto/bcmfs/bcmfs_sym_engine.h\nindex d9594246..51ff9f75 100644\n--- a/drivers/crypto/bcmfs/bcmfs_sym_engine.h\n+++ b/drivers/crypto/bcmfs/bcmfs_sym_engine.h\n@@ -12,7 +12,7 @@\n #include \"bcmfs_sym_defs.h\"\n #include \"bcmfs_sym_req.h\"\n \n-/* structure to hold element's arrtibutes */\n+/* structure to hold element's attributes */\n struct fsattr {\n \tvoid *va;\n \tuint64_t pa;\ndiff --git a/drivers/crypto/bcmfs/hw/bcmfs5_rm.c b/drivers/crypto/bcmfs/hw/bcmfs5_rm.c\nindex 86e53051..c677c0cd 100644\n--- a/drivers/crypto/bcmfs/hw/bcmfs5_rm.c\n+++ b/drivers/crypto/bcmfs/hw/bcmfs5_rm.c\n@@ -441,7 +441,7 @@ static void bcmfs5_write_doorbell(struct bcmfs_qp *qp)\n {\n \tstruct bcmfs_queue *txq = &qp->tx_q;\n \n-\t/* sync in bfeore ringing the door-bell */\n+\t/* sync in before ringing the door-bell */\n \trte_wmb();\n \n \tFS_MMIO_WRITE32(txq->descs_inflight,\ndiff --git a/drivers/crypto/caam_jr/caam_jr.c b/drivers/crypto/caam_jr/caam_jr.c\nindex 8e9cfe73..7b2c7d04 100644\n--- a/drivers/crypto/caam_jr/caam_jr.c\n+++ b/drivers/crypto/caam_jr/caam_jr.c\n@@ -58,7 +58,7 @@ struct sec_outring_entry {\n \tuint32_t status;\t/* Status for completed descriptor */\n } __rte_packed;\n \n-/* virtual address conversin when mempool support is available for ctx */\n+/* virtual address conversion when mempool support is available for ctx */\n static inline phys_addr_t\n caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)\n {\n@@ -447,7 +447,7 @@ caam_jr_prep_cdb(struct caam_jr_session *ses)\n \t\t\t}\n \t\t} else {\n \t\t\t/* Auth_only_len is overwritten in fd for each job */\n-\t\t\tshared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,\n+\t\t\tshared_desc_len = cnstr_shdsc_authentic(cdb->sh_desc,\n \t\t\t\t\ttrue, swap, SHR_SERIAL,\n \t\t\t\t\t&alginfo_c, &alginfo_a,\n \t\t\t\t\tses->iv.length,\ndiff --git a/drivers/crypto/caam_jr/caam_jr_hw_specific.h b/drivers/crypto/caam_jr/caam_jr_hw_specific.h\nindex bbe8bc3f..3c5778c9 100644\n--- a/drivers/crypto/caam_jr/caam_jr_hw_specific.h\n+++ b/drivers/crypto/caam_jr/caam_jr_hw_specific.h\n@@ -376,7 +376,7 @@ struct sec_job_ring_t {\n \tvoid *register_base_addr;\t/* Base address for SEC's\n \t\t\t\t\t * register memory for this job ring.\n \t\t\t\t\t */\n-\tuint8_t coalescing_en;\t\t/* notifies if coelescing is\n+\tuint8_t coalescing_en;\t\t/* notifies if coalescing is\n \t\t\t\t\t * enabled for the job ring\n \t\t\t\t\t */\n \tsec_job_ring_state_t jr_state;\t/* The state of this job ring */\n@@ -479,7 +479,7 @@ void hw_job_ring_error_print(struct sec_job_ring_t *job_ring, int code);\n \n /* @brief Set interrupt coalescing parameters on the Job Ring.\n  * @param [in]  job_ring\t\tThe job ring\n- * @param [in]  irq_coalesing_timer     Interrupt coalescing timer threshold.\n+ * @param [in]  irq_coalescing_timer     Interrupt coalescing timer threshold.\n  *\t\t\t\t\tThis value determines the maximum\n  *\t\t\t\t\tamount of time after processing a\n  *\t\t\t\t\tdescriptor before raising an interrupt.\ndiff --git a/drivers/crypto/caam_jr/caam_jr_pvt.h b/drivers/crypto/caam_jr/caam_jr_pvt.h\nindex 552d6b9b..52f872bc 100644\n--- a/drivers/crypto/caam_jr/caam_jr_pvt.h\n+++ b/drivers/crypto/caam_jr/caam_jr_pvt.h\n@@ -169,7 +169,7 @@ struct sec4_sg_entry {\n \n /* Structure encompassing a job descriptor which is to be processed\n  * by SEC. User should also initialise this structure with the callback\n- * function pointer which will be called by driver after recieving proccessed\n+ * function pointer which will be called by driver after receiving processed\n  * descriptor from SEC. User data is also passed in this data structure which\n  * will be sent as an argument to the user callback function.\n  */\n@@ -288,7 +288,7 @@ int caam_jr_enable_irqs(int uio_fd);\n  *  value that indicates an IRQ disable action into UIO file descriptor\n  *  of this job ring.\n  *\n- * @param [in]  uio_fd    UIO File descripto\n+ * @param [in]  uio_fd    UIO File descriptor\n  * @retval 0 for success\n  * @retval -1 value for error\n  *\ndiff --git a/drivers/crypto/caam_jr/caam_jr_uio.c b/drivers/crypto/caam_jr/caam_jr_uio.c\nindex e4ee1023..583ba3b5 100644\n--- a/drivers/crypto/caam_jr/caam_jr_uio.c\n+++ b/drivers/crypto/caam_jr/caam_jr_uio.c\n@@ -227,7 +227,7 @@ caam_jr_enable_irqs(int uio_fd)\n  *  value that indicates an IRQ disable action into UIO file descriptor\n  *  of this job ring.\n  *\n- * @param [in]  uio_fd    UIO File descripto\n+ * @param [in]  uio_fd    UIO File descriptor\n  * @retval 0 for success\n  * @retval -1 value for error\n  *\ndiff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c\nindex 70daed79..4ed91a74 100644\n--- a/drivers/crypto/ccp/ccp_crypto.c\n+++ b/drivers/crypto/ccp/ccp_crypto.c\n@@ -1299,7 +1299,7 @@ ccp_auth_slot(struct ccp_session *session)\n \tcase CCP_AUTH_ALGO_SHA512_HMAC:\n \t\t/**\n \t\t * 1. Load PHash1 = H(k ^ ipad); to LSB\n-\t\t * 2. generate IHash = H(hash on meassage with PHash1\n+\t\t * 2. generate IHash = H(hash on message with PHash1\n \t\t * as init values);\n \t\t * 3. Retrieve IHash 2 slots for 384/512\n \t\t * 4. Load Phash2 = H(k ^ opad); to LSB\ndiff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h\nindex 8e6d03ef..d307f73e 100644\n--- a/drivers/crypto/ccp/ccp_crypto.h\n+++ b/drivers/crypto/ccp/ccp_crypto.h\n@@ -70,7 +70,7 @@\n /* Maximum length for digest */\n #define DIGEST_LENGTH_MAX\t64\n \n-/* SHA LSB intialiazation values */\n+/* SHA LSB initialization values */\n \n #define SHA1_H0\t\t0x67452301UL\n #define SHA1_H1\t\t0xefcdab89UL\ndiff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h\nindex 85c8fc47..2a205cd4 100644\n--- a/drivers/crypto/ccp/ccp_dev.h\n+++ b/drivers/crypto/ccp/ccp_dev.h\n@@ -19,7 +19,7 @@\n #include <rte_crypto_sym.h>\n #include <cryptodev_pmd.h>\n \n-/**< CCP sspecific */\n+/**< CCP specific */\n #define MAX_HW_QUEUES                   5\n #define CCP_MAX_TRNG_RETRIES\t\t10\n #define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)\ndiff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h\nindex 0d363651..a510271a 100644\n--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h\n+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h\n@@ -65,7 +65,7 @@ struct pending_queue {\n \tuint64_t time_out;\n };\n \n-struct crypto_adpter_info {\n+struct crypto_adapter_info {\n \tbool enabled;\n \t/**< Set if queue pair is added to crypto adapter */\n \tstruct rte_mempool *req_mp;\n@@ -85,7 +85,7 @@ struct cnxk_cpt_qp {\n \t/**< Metabuf info required to support operations on the queue pair */\n \tstruct roc_cpt_lmtline lmtline;\n \t/**< Lmtline information */\n-\tstruct crypto_adpter_info ca;\n+\tstruct crypto_adapter_info ca;\n \t/**< Crypto adapter related info */\n };\n \ndiff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c\nindex a5b05237..e5e554fd 100644\n--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c\n+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c\n@@ -281,7 +281,7 @@ build_proto_fd(dpaa2_sec_session *sess,\n #endif\n \n static inline int\n-build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,\n+build_authentic_gcm_sg_fd(dpaa2_sec_session *sess,\n \t\t struct rte_crypto_op *op,\n \t\t struct qbman_fd *fd, __rte_unused uint16_t bpid)\n {\n@@ -426,7 +426,7 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,\n }\n \n static inline int\n-build_authenc_gcm_fd(dpaa2_sec_session *sess,\n+build_authentic_gcm_fd(dpaa2_sec_session *sess,\n \t\t     struct rte_crypto_op *op,\n \t\t     struct qbman_fd *fd, uint16_t bpid)\n {\n@@ -448,7 +448,7 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,\n \n \t/* TODO we are using the first FLE entry to store Mbuf and session ctxt.\n \t * Currently we donot know which FLE has the mbuf stored.\n-\t * So while retreiving we can go back 1 FLE from the FD -ADDR\n+\t * So while retrieving we can go back 1 FLE from the FD -ADDR\n \t * to get the MBUF Addr from the previous FLE.\n \t * We can have a better approach to use the inline Mbuf\n \t */\n@@ -566,7 +566,7 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,\n }\n \n static inline int\n-build_authenc_sg_fd(dpaa2_sec_session *sess,\n+build_authentic_sg_fd(dpaa2_sec_session *sess,\n \t\t struct rte_crypto_op *op,\n \t\t struct qbman_fd *fd, __rte_unused uint16_t bpid)\n {\n@@ -713,7 +713,7 @@ build_authenc_sg_fd(dpaa2_sec_session *sess,\n }\n \n static inline int\n-build_authenc_fd(dpaa2_sec_session *sess,\n+build_authentic_fd(dpaa2_sec_session *sess,\n \t\t struct rte_crypto_op *op,\n \t\t struct qbman_fd *fd, uint16_t bpid)\n {\n@@ -740,7 +740,7 @@ build_authenc_fd(dpaa2_sec_session *sess,\n \n \t/* we are using the first FLE entry to store Mbuf.\n \t * Currently we donot know which FLE has the mbuf stored.\n-\t * So while retreiving we can go back 1 FLE from the FD -ADDR\n+\t * So while retrieving we can go back 1 FLE from the FD -ADDR\n \t * to get the MBUF Addr from the previous FLE.\n \t * We can have a better approach to use the inline Mbuf\n \t */\n@@ -1009,7 +1009,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,\n \tmemset(fle, 0, FLE_POOL_BUF_SIZE);\n \t/* TODO we are using the first FLE entry to store Mbuf.\n \t * Currently we donot know which FLE has the mbuf stored.\n-\t * So while retreiving we can go back 1 FLE from the FD -ADDR\n+\t * So while retrieving we can go back 1 FLE from the FD -ADDR\n \t * to get the MBUF Addr from the previous FLE.\n \t * We can have a better approach to use the inline Mbuf\n \t */\n@@ -1262,7 +1262,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,\n \tmemset(fle, 0, FLE_POOL_BUF_SIZE);\n \t/* TODO we are using the first FLE entry to store Mbuf.\n \t * Currently we donot know which FLE has the mbuf stored.\n-\t * So while retreiving we can go back 1 FLE from the FD -ADDR\n+\t * So while retrieving we can go back 1 FLE from the FD -ADDR\n \t * to get the MBUF Addr from the previous FLE.\n \t * We can have a better approach to use the inline Mbuf\n \t */\n@@ -1372,10 +1372,10 @@ build_sec_fd(struct rte_crypto_op *op,\n \t\t\tret = build_auth_sg_fd(sess, op, fd, bpid);\n \t\t\tbreak;\n \t\tcase DPAA2_SEC_AEAD:\n-\t\t\tret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);\n+\t\t\tret = build_authentic_gcm_sg_fd(sess, op, fd, bpid);\n \t\t\tbreak;\n \t\tcase DPAA2_SEC_CIPHER_HASH:\n-\t\t\tret = build_authenc_sg_fd(sess, op, fd, bpid);\n+\t\t\tret = build_authentic_sg_fd(sess, op, fd, bpid);\n \t\t\tbreak;\n #ifdef RTE_LIB_SECURITY\n \t\tcase DPAA2_SEC_IPSEC:\n@@ -1396,10 +1396,10 @@ build_sec_fd(struct rte_crypto_op *op,\n \t\t\tret = build_auth_fd(sess, op, fd, bpid);\n \t\t\tbreak;\n \t\tcase DPAA2_SEC_AEAD:\n-\t\t\tret = build_authenc_gcm_fd(sess, op, fd, bpid);\n+\t\t\tret = build_authentic_gcm_fd(sess, op, fd, bpid);\n \t\t\tbreak;\n \t\tcase DPAA2_SEC_CIPHER_HASH:\n-\t\t\tret = build_authenc_fd(sess, op, fd, bpid);\n+\t\t\tret = build_authentic_fd(sess, op, fd, bpid);\n \t\t\tbreak;\n #ifdef RTE_LIB_SECURITY\n \t\tcase DPAA2_SEC_IPSEC:\n@@ -1568,7 +1568,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)\n \n \t/* we are using the first FLE entry to store Mbuf.\n \t * Currently we donot know which FLE has the mbuf stored.\n-\t * So while retreiving we can go back 1 FLE from the FD -ADDR\n+\t * So while retrieving we can go back 1 FLE from the FD -ADDR\n \t * to get the MBUF Addr from the previous FLE.\n \t * We can have a better approach to use the inline Mbuf\n \t */\n@@ -1580,7 +1580,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)\n \t}\n \top = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));\n \n-\t/* Prefeth op */\n+\t/* Prefetch op */\n \tsrc = op->sym->m_src;\n \trte_prefetch0(src);\n \n@@ -2525,7 +2525,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,\n \tpriv->flc_desc[0].desc[2] = 0;\n \n \tif (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {\n-\t\tbufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,\n+\t\tbufsize = cnstr_shdsc_authentic(priv->flc_desc[0].desc, 1,\n \t\t\t\t\t      0, SHR_SERIAL,\n \t\t\t\t\t      &cipherdata, &authdata,\n \t\t\t\t\t      session->iv.length,\ndiff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c\nindex a552e645..0d500919 100644\n--- a/drivers/crypto/dpaa_sec/dpaa_sec.c\n+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c\n@@ -628,7 +628,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)\n \t\t/* Auth_only_len is set as 0 here and it will be\n \t\t * overwritten in fd for each packet.\n \t\t */\n-\t\tshared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,\n+\t\tshared_desc_len = cnstr_shdsc_authentic(cdb->sh_desc,\n \t\t\t\ttrue, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,\n \t\t\t\tses->iv.length,\n \t\t\t\tses->digest_length, ses->dir);\n@@ -723,7 +723,7 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)\n \t\t}\n \t\tops[pkts++] = op;\n \n-\t\t/* report op status to sym->op and then free the ctx memeory */\n+\t\t/* report op status to sym->op and then free the ctx memory */\n \t\trte_mempool_put(ctx->ctx_pool, (void *)ctx);\n \n \t\tqman_dqrr_consume(fq, dq);\ndiff --git a/drivers/crypto/octeontx/otx_cryptodev_hw_access.c b/drivers/crypto/octeontx/otx_cryptodev_hw_access.c\nindex 20b28833..27604459 100644\n--- a/drivers/crypto/octeontx/otx_cryptodev_hw_access.c\n+++ b/drivers/crypto/octeontx/otx_cryptodev_hw_access.c\n@@ -296,7 +296,7 @@ cpt_vq_init(struct cpt_vf *cptvf, uint8_t group)\n \t/* CPT VF device initialization */\n \totx_cpt_vfvq_init(cptvf);\n \n-\t/* Send msg to PF to assign currnet Q to required group */\n+\t/* Send msg to PF to assign current Q to required group */\n \tcptvf->vfgrp = group;\n \terr = otx_cpt_send_vf_grp_msg(cptvf, group);\n \tif (err) {\ndiff --git a/drivers/crypto/octeontx/otx_cryptodev_mbox.h b/drivers/crypto/octeontx/otx_cryptodev_mbox.h\nindex 508f3afd..c1eedc1b 100644\n--- a/drivers/crypto/octeontx/otx_cryptodev_mbox.h\n+++ b/drivers/crypto/octeontx/otx_cryptodev_mbox.h\n@@ -70,7 +70,7 @@ void\n otx_cpt_handle_mbox_intr(struct cpt_vf *cptvf);\n \n /*\n- * Checks if VF is able to comminicate with PF\n+ * Checks if VF is able to communicate with PF\n  * and also gets the CPT number this VF is associated to.\n  */\n int\ndiff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c\nindex 9e8fd495..f7ca8a8a 100644\n--- a/drivers/crypto/octeontx/otx_cryptodev_ops.c\n+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c\n@@ -558,7 +558,7 @@ otx_cpt_enq_single_sym(struct cpt_instance *instance,\n \t\t\t\t\t &mdata, (void **)&prep_req);\n \n \tif (unlikely(ret)) {\n-\t\tCPT_LOG_DP_ERR(\"prep cryto req : op %p, cpt_op 0x%x \"\n+\t\tCPT_LOG_DP_ERR(\"prep crypto req : op %p, cpt_op 0x%x \"\n \t\t\t       \"ret 0x%x\", op, (unsigned int)cpt_op, ret);\n \t\treturn NULL;\n \t}\ndiff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c\nindex f8935080..09d8761c 100644\n--- a/drivers/crypto/qat/qat_asym.c\n+++ b/drivers/crypto/qat/qat_asym.c\n@@ -109,7 +109,7 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,\n static int qat_asym_check_nonzero(rte_crypto_param n)\n {\n \tif (n.length < 8) {\n-\t\t/* Not a case for any cryptograpic function except for DH\n+\t\t/* Not a case for any cryptographic function except for DH\n \t\t * generator which very often can be of one byte length\n \t\t */\n \t\tsize_t i;\ndiff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c\nindex 93b25752..00ec7037 100644\n--- a/drivers/crypto/qat/qat_sym.c\n+++ b/drivers/crypto/qat/qat_sym.c\n@@ -419,7 +419,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,\n \t\t\t\tICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {\n \n \t\t\t/* In case of AES-CCM this may point to user selected\n-\t\t\t * memory or iv offset in cypto_op\n+\t\t\t * memory or iv offset in crypto_op\n \t\t\t */\n \t\t\tuint8_t *aad_data = op->sym->aead.aad.data;\n \t\t\t/* This is true AAD length, it not includes 18 bytes of\ndiff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h\nindex 6ebc1767..21afb90c 100644\n--- a/drivers/crypto/qat/qat_sym_session.h\n+++ b/drivers/crypto/qat/qat_sym_session.h\n@@ -142,7 +142,7 @@ unsigned int\n qat_sym_session_get_private_size(struct rte_cryptodev *dev);\n \n void\n-qat_sym_sesssion_init_common_hdr(struct qat_sym_session *session,\n+qat_sym_session_init_common_hdr(struct qat_sym_session *session,\n \t\t\t\t\tstruct icp_qat_fw_comn_req_hdr *header,\n \t\t\t\t\tenum qat_sym_proto_flag proto_flags);\n int\ndiff --git a/drivers/crypto/virtio/virtio_cryptodev.c b/drivers/crypto/virtio/virtio_cryptodev.c\nindex ed648667..ce23e38b 100644\n--- a/drivers/crypto/virtio/virtio_cryptodev.c\n+++ b/drivers/crypto/virtio/virtio_cryptodev.c\n@@ -862,7 +862,7 @@ virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)\n \t\tVIRTIO_CRYPTO_INIT_LOG_DBG(\"queue_pairs[%d]=%p\",\n \t\t\t\ti, dev->data->queue_pairs[i]);\n \n-\t\tvirtqueue_detatch_unused(dev->data->queue_pairs[i]);\n+\t\tvirtqueue_detach_unused(dev->data->queue_pairs[i]);\n \n \t\tVIRTIO_CRYPTO_INIT_LOG_DBG(\"After freeing dataq[%d] used and \"\n \t\t\t\t\t\"unused buf\", i);\n@@ -1205,7 +1205,7 @@ virtio_crypto_sym_pad_auth_param(\n static int\n virtio_crypto_sym_pad_op_ctrl_req(\n \t\tstruct virtio_crypto_op_ctrl_req *ctrl,\n-\t\tstruct rte_crypto_sym_xform *xform, bool is_chainned,\n+\t\tstruct rte_crypto_sym_xform *xform, bool is_chained,\n \t\tuint8_t *cipher_key_data, uint8_t *auth_key_data,\n \t\tstruct virtio_crypto_session *session)\n {\n@@ -1228,7 +1228,7 @@ virtio_crypto_sym_pad_op_ctrl_req(\n \t\t\t\tVIRTIO_CRYPTO_MAX_IV_SIZE);\n \t\t\treturn -1;\n \t\t}\n-\t\tif (is_chainned)\n+\t\tif (is_chained)\n \t\t\tret = virtio_crypto_sym_pad_cipher_param(\n \t\t\t\t&ctrl->u.sym_create_session.u.chain.para\n \t\t\t\t\t\t.cipher_param, cipher_xform);\ndiff --git a/drivers/crypto/virtio/virtqueue.c b/drivers/crypto/virtio/virtqueue.c\nindex fd8be581..33985d1d 100644\n--- a/drivers/crypto/virtio/virtqueue.c\n+++ b/drivers/crypto/virtio/virtqueue.c\n@@ -22,7 +22,7 @@ virtqueue_disable_intr(struct virtqueue *vq)\n }\n \n void\n-virtqueue_detatch_unused(struct virtqueue *vq)\n+virtqueue_detach_unused(struct virtqueue *vq)\n {\n \tstruct rte_crypto_op *cop = NULL;\n \ndiff --git a/drivers/crypto/virtio/virtqueue.h b/drivers/crypto/virtio/virtqueue.h\nindex bf10c657..1a67b408 100644\n--- a/drivers/crypto/virtio/virtqueue.h\n+++ b/drivers/crypto/virtio/virtqueue.h\n@@ -99,7 +99,7 @@ void virtqueue_disable_intr(struct virtqueue *vq);\n /**\n  *  Get all mbufs to be freed.\n  */\n-void virtqueue_detatch_unused(struct virtqueue *vq);\n+void virtqueue_detach_unused(struct virtqueue *vq);\n \n static inline int\n virtqueue_full(const struct virtqueue *vq)\n@@ -145,7 +145,7 @@ virtqueue_notify(struct virtqueue *vq)\n {\n \t/*\n \t * Ensure updated avail->idx is visible to host.\n-\t * For virtio on IA, the notificaiton is through io port operation\n+\t * For virtio on IA, the notification is through io port operation\n \t * which is a serialization instruction itself.\n \t */\n \tVTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);\ndiff --git a/drivers/dma/ioat/ioat_dmadev.c b/drivers/dma/ioat/ioat_dmadev.c\nindex a230496b..533f7231 100644\n--- a/drivers/dma/ioat/ioat_dmadev.c\n+++ b/drivers/dma/ioat/ioat_dmadev.c\n@@ -624,7 +624,7 @@ ioat_dmadev_create(const char *name, struct rte_pci_device *dev)\n \tioat = dmadev->data->dev_private;\n \tioat->dmadev = dmadev;\n \tioat->regs = dev->mem_resource[0].addr;\n-\tioat->doorbell = &ioat->regs->dmacount;\n+\tioat->doorbell = &ioat->regs->dmaccount;\n \tioat->qcfg.nb_desc = 0;\n \tioat->desc_ring = NULL;\n \tioat->version = ioat->regs->cbver;\ndiff --git a/drivers/dma/ioat/ioat_hw_defs.h b/drivers/dma/ioat/ioat_hw_defs.h\nindex dc3493a7..88bf09a7 100644\n--- a/drivers/dma/ioat/ioat_hw_defs.h\n+++ b/drivers/dma/ioat/ioat_hw_defs.h\n@@ -68,7 +68,7 @@ struct ioat_registers {\n \tuint8_t\t\treserved6[0x2];\t/* 0x82 */\n \tuint8_t\t\tchancmd;\t/* 0x84 */\n \tuint8_t\t\treserved3[1];\t/* 0x85 */\n-\tuint16_t\tdmacount;\t/* 0x86 */\n+\tuint16_t\tdmaccount;\t/* 0x86 */\n \tuint64_t\tchansts;\t/* 0x88 */\n \tuint64_t\tchainaddr;\t/* 0x90 */\n \tuint64_t\tchancmp;\t/* 0x98 */\ndiff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c\nindex d9e4f731..81cbdd28 100644\n--- a/drivers/dma/skeleton/skeleton_dmadev.c\n+++ b/drivers/dma/skeleton/skeleton_dmadev.c\n@@ -169,7 +169,7 @@ vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)\n \tstruct rte_ring *completed;\n \tuint16_t i;\n \n-\tdesc = rte_zmalloc_socket(\"dma_skelteon_desc\",\n+\tdesc = rte_zmalloc_socket(\"dma_skeleton_desc\",\n \t\t\t\t  nb_desc * sizeof(struct skeldma_desc),\n \t\t\t\t  RTE_CACHE_LINE_SIZE, hw->socket_id);\n \tif (desc == NULL) {\ndiff --git a/drivers/event/cnxk/cnxk_eventdev_selftest.c b/drivers/event/cnxk/cnxk_eventdev_selftest.c\nindex 69c15b1d..2fe6467f 100644\n--- a/drivers/event/cnxk/cnxk_eventdev_selftest.c\n+++ b/drivers/event/cnxk/cnxk_eventdev_selftest.c\n@@ -140,7 +140,7 @@ _eventdev_setup(int mode)\n \tstruct rte_event_dev_info info;\n \tint i, ret;\n \n-\t/* Create and destrory pool for each test case to make it standalone */\n+\t/* Create and destroy pool for each test case to make it standalone */\n \teventdev_test_mempool = rte_pktmbuf_pool_create(\n \t\tpool_name, MAX_EVENTS, 0, 0, 512, rte_socket_id());\n \tif (!eventdev_test_mempool) {\n@@ -1543,7 +1543,7 @@ cnxk_sso_selftest(const char *dev_name)\n \t\tcn9k_sso_set_rsrc(dev);\n \t\tif (cnxk_sso_testsuite_run(dev_name))\n \t\t\treturn rc;\n-\t\t/* Verift dual ws mode. */\n+\t\t/* Verify dual ws mode. */\n \t\tprintf(\"Verifying CN9K Dual workslot mode\\n\");\n \t\tdev->dual_ws = 1;\n \t\tcn9k_sso_set_rsrc(dev);\ndiff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c\nindex 16e9764d..d75f12e3 100644\n--- a/drivers/event/dlb2/dlb2.c\n+++ b/drivers/event/dlb2/dlb2.c\n@@ -2145,7 +2145,7 @@ dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,\n \t}\n \n \t/* This is expected with eventdev API!\n-\t * It blindly attemmpts to unmap all queues.\n+\t * It blindly attempts to unmap all queues.\n \t */\n \tif (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {\n \t\tDLB2_LOG_DBG(\"dlb2: ignoring LB QID %d not mapped for qm_port %d.\\n\",\ndiff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h\nindex a5e2f8e4..7837ae87 100644\n--- a/drivers/event/dlb2/dlb2_priv.h\n+++ b/drivers/event/dlb2/dlb2_priv.h\n@@ -519,7 +519,7 @@ struct dlb2_eventdev_port {\n \tbool setup_done;\n \t/* enq_configured is set when the qm port is created */\n \tbool enq_configured;\n-\tuint8_t implicit_release; /* release events before dequeueing */\n+\tuint8_t implicit_release; /* release events before dequeuing */\n }  __rte_cache_aligned;\n \n struct dlb2_queue {\ndiff --git a/drivers/event/dlb2/dlb2_selftest.c b/drivers/event/dlb2/dlb2_selftest.c\nindex 2113bc2c..1863ffe0 100644\n--- a/drivers/event/dlb2/dlb2_selftest.c\n+++ b/drivers/event/dlb2/dlb2_selftest.c\n@@ -223,7 +223,7 @@ test_stop_flush(struct test *t) /* test to check we can properly flush events */\n \t\t\t\t    0,\n \t\t\t\t    RTE_EVENT_PORT_ATTR_DEQ_DEPTH,\n \t\t\t\t    &dequeue_depth)) {\n-\t\tprintf(\"%d: Error retrieveing dequeue depth\\n\", __LINE__);\n+\t\tprintf(\"%d: Error retrieving dequeue depth\\n\", __LINE__);\n \t\tgoto err;\n \t}\n \ndiff --git a/drivers/event/dlb2/rte_pmd_dlb2.h b/drivers/event/dlb2/rte_pmd_dlb2.h\nindex 74399db0..1dbd885a 100644\n--- a/drivers/event/dlb2/rte_pmd_dlb2.h\n+++ b/drivers/event/dlb2/rte_pmd_dlb2.h\n@@ -24,7 +24,7 @@ extern \"C\" {\n  * Selects the token pop mode for a DLB2 port.\n  */\n enum dlb2_token_pop_mode {\n-\t/* Pop the CQ tokens immediately after dequeueing. */\n+\t/* Pop the CQ tokens immediately after dequeuing. */\n \tAUTO_POP,\n \t/* Pop CQ tokens after (dequeue_depth - 1) events are released.\n \t * Supported on load-balanced ports only.\ndiff --git a/drivers/event/dpaa2/dpaa2_eventdev_selftest.c b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c\nindex bbbd2095..b549bdfc 100644\n--- a/drivers/event/dpaa2/dpaa2_eventdev_selftest.c\n+++ b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c\n@@ -118,7 +118,7 @@ _eventdev_setup(int mode)\n \tstruct rte_event_dev_info info;\n \tconst char *pool_name = \"evdev_dpaa2_test_pool\";\n \n-\t/* Create and destrory pool for each test case to make it standalone */\n+\t/* Create and destroy pool for each test case to make it standalone */\n \teventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,\n \t\t\t\t\tMAX_EVENTS,\n \t\t\t\t\t0 /*MBUF_CACHE_SIZE*/,\ndiff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h\nindex e64ae26f..c907c00c 100644\n--- a/drivers/event/dsw/dsw_evdev.h\n+++ b/drivers/event/dsw/dsw_evdev.h\n@@ -24,7 +24,7 @@\n /* Multiple 24-bit flow ids will map to the same DSW-level flow. The\n  * number of DSW flows should be high enough make it unlikely that\n  * flow ids of several large flows hash to the same DSW-level flow.\n- * Such collisions will limit parallism and thus the number of cores\n+ * Such collisions will limit parallelism and thus the number of cores\n  * that may be utilized. However, configuring a large number of DSW\n  * flows might potentially, depending on traffic and actual\n  * application flow id value range, result in each such DSW-level flow\n@@ -104,7 +104,7 @@\n /* Only one outstanding migration per port is allowed */\n #define DSW_MAX_PAUSED_FLOWS (DSW_MAX_PORTS*DSW_MAX_FLOWS_PER_MIGRATION)\n \n-/* Enough room for paus request/confirm and unpaus request/confirm for\n+/* Enough room for pause request/confirm and unpaus request/confirm for\n  * all possible senders.\n  */\n #define DSW_CTL_IN_RING_SIZE ((DSW_MAX_PORTS-1)*4)\ndiff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c\nindex c6ed4702..e209cd5b 100644\n--- a/drivers/event/dsw/dsw_event.c\n+++ b/drivers/event/dsw/dsw_event.c\n@@ -1096,7 +1096,7 @@ dsw_port_ctl_process(struct dsw_evdev *dsw, struct dsw_port *port)\n static void\n dsw_port_note_op(struct dsw_port *port, uint16_t num_events)\n {\n-\t/* To pull the control ring reasonbly often on busy ports,\n+\t/* To pull the control ring reasonably often on busy ports,\n \t * each dequeued/enqueued event is considered an 'op' too.\n \t */\n \tport->ops_since_bg_task += (num_events+1);\n@@ -1180,7 +1180,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,\n \t * addition, a port cannot be left \"unattended\" (e.g. unused)\n \t * for long periods of time, since that would stall\n \t * migration. Eventdev API extensions to provide a cleaner way\n-\t * to archieve both of these functions should be\n+\t * to archive both of these functions should be\n \t * considered.\n \t */\n \tif (unlikely(events_len == 0)) {\ndiff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h\nindex bb1056a9..e46dc055 100644\n--- a/drivers/event/octeontx/ssovf_evdev.h\n+++ b/drivers/event/octeontx/ssovf_evdev.h\n@@ -88,7 +88,7 @@\n \n /*\n  * In Cavium OCTEON TX SoC, all accesses to the device registers are\n- * implictly strongly ordered. So, The relaxed version of IO operation is\n+ * implicitly strongly ordered. So, The relaxed version of IO operation is\n  * safe to use with out any IO memory barriers.\n  */\n #define ssovf_read64 rte_read64_relaxed\ndiff --git a/drivers/event/octeontx/ssovf_evdev_selftest.c b/drivers/event/octeontx/ssovf_evdev_selftest.c\nindex d7b0d221..b5552363 100644\n--- a/drivers/event/octeontx/ssovf_evdev_selftest.c\n+++ b/drivers/event/octeontx/ssovf_evdev_selftest.c\n@@ -151,7 +151,7 @@ _eventdev_setup(int mode)\n \tstruct rte_event_dev_info info;\n \tconst char *pool_name = \"evdev_octeontx_test_pool\";\n \n-\t/* Create and destrory pool for each test case to make it standalone */\n+\t/* Create and destroy pool for each test case to make it standalone */\n \teventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,\n \t\t\t\t\tMAX_EVENTS,\n \t\t\t\t\t0 /*MBUF_CACHE_SIZE*/,\ndiff --git a/drivers/event/octeontx2/otx2_evdev_selftest.c b/drivers/event/octeontx2/otx2_evdev_selftest.c\nindex 48bfaf89..a89637d6 100644\n--- a/drivers/event/octeontx2/otx2_evdev_selftest.c\n+++ b/drivers/event/octeontx2/otx2_evdev_selftest.c\n@@ -139,7 +139,7 @@ _eventdev_setup(int mode)\n \tstruct rte_event_dev_info info;\n \tint i, ret;\n \n-\t/* Create and destrory pool for each test case to make it standalone */\n+\t/* Create and destroy pool for each test case to make it standalone */\n \teventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, MAX_EVENTS,\n \t\t\t\t\t\t\t0, 0, 512,\n \t\t\t\t\t\t\trte_socket_id());\ndiff --git a/drivers/event/octeontx2/otx2_tim_evdev.c b/drivers/event/octeontx2/otx2_tim_evdev.c\nindex 6da8b14b..440b713a 100644\n--- a/drivers/event/octeontx2/otx2_tim_evdev.c\n+++ b/drivers/event/octeontx2/otx2_tim_evdev.c\n@@ -145,7 +145,7 @@ tim_err_desc(int rc)\n {\n \tswitch (rc) {\n \tcase TIM_AF_NO_RINGS_LEFT:\n-\t\totx2_err(\"Unable to allocat new TIM ring.\");\n+\t\totx2_err(\"Unable to allocate new TIM ring.\");\n \t\tbreak;\n \tcase TIM_AF_INVALID_NPA_PF_FUNC:\n \t\totx2_err(\"Invalid NPA pf func.\");\n@@ -189,7 +189,7 @@ tim_err_desc(int rc)\n \tcase TIM_AF_INVALID_ENABLE_DONTFREE:\n \t\totx2_err(\"Invalid Don't free value.\");\n \t\tbreak;\n-\tcase TIM_AF_ENA_DONTFRE_NSET_PERIODIC:\n+\tcase TIM_AF_ENA_DONTFREE_NSET_PERIODIC:\n \t\totx2_err(\"Don't free bit not set when periodic is enabled.\");\n \t\tbreak;\n \tcase TIM_AF_RING_ALREADY_DISABLED:\ndiff --git a/drivers/event/octeontx2/otx2_worker_dual.h b/drivers/event/octeontx2/otx2_worker_dual.h\nindex 36ae4dd8..ca06d51c 100644\n--- a/drivers/event/octeontx2/otx2_worker_dual.h\n+++ b/drivers/event/octeontx2/otx2_worker_dual.h\n@@ -74,7 +74,7 @@ otx2_ssogws_dual_get_work(struct otx2_ssogws_state *ws,\n \t\t\t\t\t event.flow_id, flags, lookup_mem);\n \t\t\t/* Extracting tstamp, if PTP enabled. CGX will prepend\n \t\t\t * the timestamp at starting of packet data and it can\n-\t\t\t * be derieved from WQE 9 dword which corresponds to SG\n+\t\t\t * be derived from WQE 9 dword which corresponds to SG\n \t\t\t * iova.\n \t\t\t * rte_pktmbuf_mtod_offset can be used for this purpose\n \t\t\t * but it brings down the performance as it reads\ndiff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c\nindex 15c10240..8b6890b2 100644\n--- a/drivers/event/opdl/opdl_evdev.c\n+++ b/drivers/event/opdl/opdl_evdev.c\n@@ -703,7 +703,7 @@ opdl_probe(struct rte_vdev_device *vdev)\n \t}\n \n \tPMD_DRV_LOG(INFO, \"DEV_ID:[%02d] : \"\n-\t\t      \"Success - creating eventdev device %s, numa_node:[%d], do_valdation:[%s]\"\n+\t\t      \"Success - creating eventdev device %s, numa_node:[%d], do_validation:[%s]\"\n \t\t\t  \" , self_test:[%s]\\n\",\n \t\t      dev->data->dev_id,\n \t\t      name,\ndiff --git a/drivers/event/opdl/opdl_test.c b/drivers/event/opdl/opdl_test.c\nindex e4fc70a4..24b92df4 100644\n--- a/drivers/event/opdl/opdl_test.c\n+++ b/drivers/event/opdl/opdl_test.c\n@@ -864,7 +864,7 @@ qid_basic(struct test *t)\n \t}\n \n \n-\t/* Start the devicea */\n+\t/* Start the device */\n \tif (!err) {\n \t\tif (rte_event_dev_start(evdev) < 0) {\n \t\t\tPMD_DRV_LOG(ERR, \"%s:%d: Error with start call\\n\",\ndiff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h\nindex 33645bd1..4fd10544 100644\n--- a/drivers/event/sw/sw_evdev.h\n+++ b/drivers/event/sw/sw_evdev.h\n@@ -180,7 +180,7 @@ struct sw_port {\n \tuint16_t outstanding_releases __rte_cache_aligned;\n \tuint16_t inflight_max; /* app requested max inflights for this port */\n \tuint16_t inflight_credits; /* num credits this port has right now */\n-\tuint8_t implicit_release; /* release events before dequeueing */\n+\tuint8_t implicit_release; /* release events before dequeuing */\n \n \tuint16_t last_dequeue_burst_sz; /* how big the burst was */\n \tuint64_t last_dequeue_ticks; /* used to track burst processing time */\ndiff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c\nindex 9768d3a0..cb97a4d6 100644\n--- a/drivers/event/sw/sw_evdev_selftest.c\n+++ b/drivers/event/sw/sw_evdev_selftest.c\n@@ -1109,7 +1109,7 @@ xstats_tests(struct test *t)\n \t\t\t\t\tNULL,\n \t\t\t\t\t0);\n \n-\t/* Verify that the resetable stats are reset, and others are not */\n+\t/* Verify that the resettable stats are reset, and others are not */\n \tstatic const uint64_t queue_expected_zero[] = {\n \t\t0 /* rx */,\n \t\t0 /* tx */,\ndiff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c\nindex f17aff96..32639a3b 100644\n--- a/drivers/mempool/dpaa/dpaa_mempool.c\n+++ b/drivers/mempool/dpaa/dpaa_mempool.c\n@@ -258,7 +258,7 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,\n \t\t}\n \t\t/* assigning mbuf from the acquired objects */\n \t\tfor (i = 0; (i < ret) && bufs[i].addr; i++) {\n-\t\t\t/* TODO-errata - objerved that bufs may be null\n+\t\t\t/* TODO-errata - observed that bufs may be null\n \t\t\t * i.e. first buffer is valid, remaining 6 buffers\n \t\t\t * may be null.\n \t\t\t */\ndiff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c\nindex 94dc5cd8..8fd9edce 100644\n--- a/drivers/mempool/octeontx/octeontx_fpavf.c\n+++ b/drivers/mempool/octeontx/octeontx_fpavf.c\n@@ -669,7 +669,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)\n \t\t\tbreak;\n \t\t}\n \n-\t\t/* Imsert it into an ordered linked list */\n+\t\t/* Insert it into an ordered linked list */\n \t\tfor (curr = &head; curr[0] != NULL; curr = curr[0]) {\n \t\t\tif ((uintptr_t)node <= (uintptr_t)curr[0])\n \t\t\t\tbreak;\n@@ -705,7 +705,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)\n \n \tret = octeontx_fpapf_aura_detach(gpool);\n \tif (ret) {\n-\t\tfpavf_log_err(\"Failed to dettach gaura %u. error code=%d\\n\",\n+\t\tfpavf_log_err(\"Failed to detach gaura %u. error code=%d\\n\",\n \t\t\t      gpool, ret);\n \t}\n \ndiff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c\nindex b618cba3..1d4b5e1c 100644\n--- a/drivers/net/ark/ark_ethdev.c\n+++ b/drivers/net/ark/ark_ethdev.c\n@@ -309,7 +309,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)\n \t\treturn -1;\n \t}\n \tif (ark->sysctrl.t32[3] != 0) {\n-\t\tif (ark_rqp_lasped(ark->rqpacing)) {\n+\t\tif (ark_rqp_lapsed(ark->rqpacing)) {\n \t\t\tARK_PMD_LOG(ERR, \"Arkville Evaluation System - \"\n \t\t\t\t    \"Timer has Expired\\n\");\n \t\t\treturn -1;\n@@ -565,7 +565,7 @@ eth_ark_dev_start(struct rte_eth_dev *dev)\n \tif (ark->start_pg && (dev->data->port_id == 0)) {\n \t\tpthread_t thread;\n \n-\t\t/* Delay packet generatpr start allow the hardware to be ready\n+\t\t/* Delay packet generator start allow the hardware to be ready\n \t\t * This is only used for sanity checking with internal generator\n \t\t */\n \t\tif (rte_ctrl_thread_create(&thread, \"ark-delay-pg\", NULL,\ndiff --git a/drivers/net/ark/ark_global.h b/drivers/net/ark/ark_global.h\nindex 6f9b3013..49193ac5 100644\n--- a/drivers/net/ark/ark_global.h\n+++ b/drivers/net/ark/ark_global.h\n@@ -67,7 +67,7 @@\n typedef void (*rx_user_meta_hook_fn)(struct rte_mbuf *mbuf,\n \t\t\t\t     const uint32_t *meta,\n \t\t\t\t     void *ext_user_data);\n-/* TX hook poplulate *meta, with up to 20 bytes.  meta_cnt\n+/* TX hook populate *meta, with up to 20 bytes.  meta_cnt\n  * returns the number of uint32_t words populated, 0 to 5\n  */\n typedef void (*tx_user_meta_hook_fn)(const struct rte_mbuf *mbuf,\ndiff --git a/drivers/net/ark/ark_rqp.c b/drivers/net/ark/ark_rqp.c\nindex ef9ccd07..1193a462 100644\n--- a/drivers/net/ark/ark_rqp.c\n+++ b/drivers/net/ark/ark_rqp.c\n@@ -62,7 +62,7 @@ ark_rqp_dump(struct ark_rqpace_t *rqp)\n }\n \n int\n-ark_rqp_lasped(struct ark_rqpace_t *rqp)\n+ark_rqp_lapsed(struct ark_rqpace_t *rqp)\n {\n-\treturn rqp->lasped;\n+\treturn rqp->lapsed;\n }\ndiff --git a/drivers/net/ark/ark_rqp.h b/drivers/net/ark/ark_rqp.h\nindex 6c804606..fc9c5b57 100644\n--- a/drivers/net/ark/ark_rqp.h\n+++ b/drivers/net/ark/ark_rqp.h\n@@ -48,10 +48,10 @@ struct ark_rqpace_t {\n \tvolatile uint32_t cpld_pending_max;\n \tvolatile uint32_t err_count_other;\n \tchar eval[4];\n-\tvolatile int lasped;\n+\tvolatile int lapsed;\n };\n \n void ark_rqp_dump(struct ark_rqpace_t *rqp);\n void ark_rqp_stats_reset(struct ark_rqpace_t *rqp);\n-int ark_rqp_lasped(struct ark_rqpace_t *rqp);\n+int ark_rqp_lapsed(struct ark_rqpace_t *rqp);\n #endif\ndiff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c\nindex 1c03e8bf..3a028f42 100644\n--- a/drivers/net/atlantic/atl_ethdev.c\n+++ b/drivers/net/atlantic/atl_ethdev.c\n@@ -1423,7 +1423,7 @@ atl_dev_interrupt_action(struct rte_eth_dev *dev,\n  * @param handle\n  *  Pointer to interrupt handle.\n  * @param param\n- *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ *  The address of parameter (struct rte_eth_dev *) registered before.\n  *\n  * @return\n  *  void\ndiff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c\nindex e3f57ded..aeb79bf5 100644\n--- a/drivers/net/atlantic/atl_rxtx.c\n+++ b/drivers/net/atlantic/atl_rxtx.c\n@@ -1094,7 +1094,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t * register.\n \t * Update the RDT with the value of the last processed RX descriptor\n \t * minus 1, to guarantee that the RDT register is never equal to the\n-\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * RDH register, which creates a \"full\" ring situation from the\n \t * hardware point of view...\n \t */\n \tnb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);\ndiff --git a/drivers/net/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/atlantic/hw_atl/hw_atl_b0.c\nindex 7d0e7240..d0eb4af9 100644\n--- a/drivers/net/atlantic/hw_atl/hw_atl_b0.c\n+++ b/drivers/net/atlantic/hw_atl/hw_atl_b0.c\n@@ -281,7 +281,7 @@ int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)\n \thw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);\n \thw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);\n \n-\t/* VLAN proimisc bu defauld */\n+\t/* VLAN promisc by default */\n \thw_atl_rpf_vlan_prom_mode_en_set(self, 1);\n \n \t/* Rx Interrupts */\ndiff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c\nindex daeb3308..2226b4f6 100644\n--- a/drivers/net/axgbe/axgbe_dev.c\n+++ b/drivers/net/axgbe/axgbe_dev.c\n@@ -1046,7 +1046,7 @@ static int axgbe_config_rx_threshold(struct axgbe_port *pdata,\n \treturn 0;\n }\n \n-/*Distrubting fifo size  */\n+/*Distributing fifo size  */\n static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)\n {\n \tunsigned int fifo_size;\ndiff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c\nindex 7d40c18a..76e12d12 100644\n--- a/drivers/net/axgbe/axgbe_ethdev.c\n+++ b/drivers/net/axgbe/axgbe_ethdev.c\n@@ -284,7 +284,7 @@ static int axgbe_phy_reset(struct axgbe_port *pdata)\n  * @param handle\n  *  Pointer to interrupt handle.\n  * @param param\n- *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ *  The address of parameter (struct rte_eth_dev *) registered before.\n  *\n  * @return\n  *  void\ndiff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h\nindex a207f2ae..e06d40f9 100644\n--- a/drivers/net/axgbe/axgbe_ethdev.h\n+++ b/drivers/net/axgbe/axgbe_ethdev.h\n@@ -641,7 +641,7 @@ struct axgbe_port {\n \n \tunsigned int kr_redrv;\n \n-\t/* Auto-negotiation atate machine support */\n+\t/* Auto-negotiation state machine support */\n \tunsigned int an_int;\n \tunsigned int an_status;\n \tenum axgbe_an an_result;\ndiff --git a/drivers/net/axgbe/axgbe_phy_impl.c b/drivers/net/axgbe/axgbe_phy_impl.c\nindex 02236ec1..c114550a 100644\n--- a/drivers/net/axgbe/axgbe_phy_impl.c\n+++ b/drivers/net/axgbe/axgbe_phy_impl.c\n@@ -347,7 +347,7 @@ static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target,\n \n \tretry = 1;\n again2:\n-\t/* Read the specfied register */\n+\t/* Read the specified register */\n \ti2c_op.cmd = AXGBE_I2C_CMD_READ;\n \ti2c_op.target = target;\n \ti2c_op.len = val_len;\n@@ -1093,7 +1093,7 @@ static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused)\n {\n \treturn 0;\n \t/* Dummy API since there is no case to support\n-\t * external phy devices registred through kerenl apis\n+\t * external phy devices registred through kernel apis\n \t */\n }\n \ndiff --git a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c\nindex 816371cd..c1f7a3d0 100644\n--- a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c\n+++ b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c\n@@ -11,7 +11,7 @@\n #include <rte_mempool.h>\n #include <rte_mbuf.h>\n \n-/* Useful to avoid shifting for every descriptor prepration*/\n+/* Useful to avoid shifting for every descriptor preparation*/\n #define TX_DESC_CTRL_FLAGS 0xb000000000000000\n #define TX_DESC_CTRL_FLAG_TMST 0x40000000\n #define TX_FREE_BULK\t   8\ndiff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c\nindex f67db015..9179fdc8 100644\n--- a/drivers/net/bnx2x/bnx2x.c\n+++ b/drivers/net/bnx2x/bnx2x.c\n@@ -926,7 +926,7 @@ storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid)\n  *   block.\n  *\n  * RAMROD_CMD_ID_ETH_UPDATE\n- *   Used to update the state of the leading connection, usually to udpate\n+ *   Used to update the state of the leading connection, usually to update\n  *   the RSS indirection table.  Completes on the RCQ of the leading\n  *   connection. (Not currently used under FreeBSD until OS support becomes\n  *   available.)\n@@ -941,7 +941,7 @@ storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid)\n  *   the RCQ of the leading connection.\n  *\n  * RAMROD_CMD_ID_ETH_CFC_DEL\n- *   Used when tearing down a conneciton prior to driver unload.  Completes\n+ *   Used when tearing down a connection prior to driver unload.  Completes\n  *   on the RCQ of the leading connection (since the current connection\n  *   has been completely removed from controller memory).\n  *\n@@ -1072,7 +1072,7 @@ bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi,\n \n \t/*\n \t * It's ok if the actual decrement is issued towards the memory\n-\t * somewhere between the lock and unlock. Thus no more explict\n+\t * somewhere between the lock and unlock. Thus no more explicit\n \t * memory barrier is needed.\n \t */\n \tif (common) {\n@@ -1190,7 +1190,7 @@ bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,\n \t\tbreak;\n \n \tcase (RAMROD_CMD_ID_ETH_TERMINATE):\n-\t\tPMD_DRV_LOG(DEBUG, sc, \"got MULTI[%d] teminate ramrod\", cid);\n+\t\tPMD_DRV_LOG(DEBUG, sc, \"got MULTI[%d] terminate ramrod\", cid);\n \t\tdrv_cmd = ECORE_Q_CMD_TERMINATE;\n \t\tbreak;\n \n@@ -1476,7 +1476,7 @@ bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode,\n \tcase BNX2X_RX_MODE_ALLMULTI_PROMISC:\n \tcase BNX2X_RX_MODE_PROMISC:\n \t\t/*\n-\t\t * According to deffinition of SI mode, iface in promisc mode\n+\t\t * According to definition of SI mode, iface in promisc mode\n \t\t * should receive matched and unmatched (in resolution of port)\n \t\t * unicast packets.\n \t\t */\n@@ -1944,7 +1944,7 @@ static void bnx2x_disable_close_the_gate(struct bnx2x_softc *sc)\n \n /*\n  * Cleans the object that have internal lists without sending\n- * ramrods. Should be run when interrutps are disabled.\n+ * ramrods. Should be run when interrupts are disabled.\n  */\n static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)\n {\n@@ -2043,7 +2043,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link\n \n \t/*\n \t * Nothing to do during unload if previous bnx2x_nic_load()\n-\t * did not completed successfully - all resourses are released.\n+\t * did not completed successfully - all resources are released.\n \t */\n \tif ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) {\n \t\treturn 0;\n@@ -2084,7 +2084,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link\n \t\t/*\n \t\t * Prevent transactions to host from the functions on the\n \t\t * engine that doesn't reset global blocks in case of global\n-\t\t * attention once gloabl blocks are reset and gates are opened\n+\t\t * attention once global blocks are reset and gates are opened\n \t\t * (the engine which leader will perform the recovery\n \t\t * last).\n \t\t */\n@@ -2101,7 +2101,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link\n \n \t/*\n \t * At this stage no more interrupts will arrive so we may safely clean\n-\t * the queue'able objects here in case they failed to get cleaned so far.\n+\t * the queueable objects here in case they failed to get cleaned so far.\n \t */\n \tif (IS_PF(sc)) {\n \t\tbnx2x_squeeze_objects(sc);\n@@ -2151,7 +2151,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link\n }\n \n /*\n- * Encapsulte an mbuf cluster into the tx bd chain and makes the memory\n+ * Encapsulate an mbuf cluster into the tx bd chain and makes the memory\n  * visible to the controller.\n  *\n  * If an mbuf is submitted to this routine and cannot be given to the\n@@ -2719,7 +2719,7 @@ static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc)\n \treturn val1 != 0;\n }\n \n-/* send load requrest to mcp and analyze response */\n+/* send load request to mcp and analyze response */\n static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code)\n {\n \tPMD_INIT_FUNC_TRACE(sc);\n@@ -4031,17 +4031,17 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)\n \t\t}\n \t}\n \n-\tif (attn & HW_INTERRUT_ASSERT_SET_2) {\n+\tif (attn & HW_INTERRUPT_ASSERT_SET_2) {\n \t\treg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :\n \t\t\t      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);\n \n \t\tval = REG_RD(sc, reg_offset);\n-\t\tval &= ~(attn & HW_INTERRUT_ASSERT_SET_2);\n+\t\tval &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);\n \t\tREG_WR(sc, reg_offset, val);\n \n \t\tPMD_DRV_LOG(ERR, sc,\n \t\t\t    \"FATAL HW block attention set2 0x%x\",\n-\t\t\t    (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_2));\n+\t\t\t    (uint32_t) (attn & HW_INTERRUPT_ASSERT_SET_2));\n \t\trte_panic(\"HW block attention set2\");\n \t}\n }\n@@ -4061,17 +4061,17 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x_softc *sc, uint32_t attn)\n \t\t}\n \t}\n \n-\tif (attn & HW_INTERRUT_ASSERT_SET_1) {\n+\tif (attn & HW_INTERRUPT_ASSERT_SET_1) {\n \t\treg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :\n \t\t\t      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);\n \n \t\tval = REG_RD(sc, reg_offset);\n-\t\tval &= ~(attn & HW_INTERRUT_ASSERT_SET_1);\n+\t\tval &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);\n \t\tREG_WR(sc, reg_offset, val);\n \n \t\tPMD_DRV_LOG(ERR, sc,\n \t\t\t    \"FATAL HW block attention set1 0x%08x\",\n-\t\t\t    (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_1));\n+\t\t\t    (uint32_t) (attn & HW_INTERRUPT_ASSERT_SET_1));\n \t\trte_panic(\"HW block attention set1\");\n \t}\n }\n@@ -4103,13 +4103,13 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x_softc *sc, uint32_t attn)\n \t\tbnx2x_release_phy_lock(sc);\n \t}\n \n-\tif (attn & HW_INTERRUT_ASSERT_SET_0) {\n+\tif (attn & HW_INTERRUPT_ASSERT_SET_0) {\n \t\tval = REG_RD(sc, reg_offset);\n-\t\tval &= ~(attn & HW_INTERRUT_ASSERT_SET_0);\n+\t\tval &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);\n \t\tREG_WR(sc, reg_offset, val);\n \n \t\trte_panic(\"FATAL HW block attention set0 0x%lx\",\n-\t\t\t  (attn & (unsigned long)HW_INTERRUT_ASSERT_SET_0));\n+\t\t\t  (attn & (unsigned long)HW_INTERRUPT_ASSERT_SET_0));\n \t}\n }\n \n@@ -5325,7 +5325,7 @@ static void bnx2x_func_init(struct bnx2x_softc *sc, struct bnx2x_func_init_param\n  *   sum of vn_min_rates.\n  *     or\n  *   0 - if all the min_rates are 0.\n- * In the later case fainess algorithm should be deactivated.\n+ * In the later case fairness algorithm should be deactivated.\n  * If all min rates are not zero then those that are zeroes will be set to 1.\n  */\n static void bnx2x_calc_vn_min(struct bnx2x_softc *sc, struct cmng_init_input *input)\n@@ -6564,7 +6564,7 @@ bnx2x_pf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,\n \ttxq_init->fw_sb_id = fp->fw_sb_id;\n \n \t/*\n-\t * set the TSS leading client id for TX classfication to the\n+\t * set the TSS leading client id for TX classification to the\n \t * leading RSS client id\n \t */\n \ttxq_init->tss_leading_cl_id = BNX2X_FP(sc, 0, cl_id);\n@@ -7634,8 +7634,8 @@ static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc)\n }\n \n /*\n-* Walk the PCI capabiites list for the device to find what features are\n-* supported. These capabilites may be enabled/disabled by firmware so it's\n+* Walk the PCI capabilities list for the device to find what features are\n+* supported. These capabilities may be enabled/disabled by firmware so it's\n * best to walk the list rather than make assumptions.\n */\n static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)\n@@ -8425,7 +8425,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)\n \t} else {\n \t\tsc->devinfo.int_block = INT_BLOCK_IGU;\n \n-/* do not allow device reset during IGU info preocessing */\n+/* do not allow device reset during IGU info processing */\n \t\tbnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);\n \n \t\tval = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);\n@@ -9765,7 +9765,7 @@ int bnx2x_attach(struct bnx2x_softc *sc)\n \n \tsc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;\n \n-\t/* get PCI capabilites */\n+\t/* get PCI capabilities */\n \tbnx2x_probe_pci_caps(sc);\n \n \tif (sc->devinfo.pcie_msix_cap_reg != 0) {\n@@ -10284,7 +10284,7 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)\n  *          stay set)\n  *      f.  If this is VNIC 3 of a port then also init\n  *          first_timers_ilt_entry to zero and last_timers_ilt_entry\n- *          to the last enrty in the ILT.\n+ *          to the last entry in the ILT.\n  *\n  *      Notes:\n  *      Currently the PF error in the PGLC is non recoverable.\n@@ -11090,7 +11090,7 @@ static void bnx2x_hw_enable_status(struct bnx2x_softc *sc)\n /**\n  *\tbnx2x_pf_flr_clnup\n  *\ta. re-enable target read on the PF\n- *\tb. poll cfc per function usgae counter\n+ *\tb. poll cfc per function usage counter\n  *\tc. poll the qm perfunction usage counter\n  *\td. poll the tm per function usage counter\n  *\te. poll the tm per function scan-done indication\ndiff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h\nindex 80d19cbf..3f7d82c0 100644\n--- a/drivers/net/bnx2x/bnx2x.h\n+++ b/drivers/net/bnx2x/bnx2x.h\n@@ -681,13 +681,13 @@ struct bnx2x_slowpath {\n }; /* struct bnx2x_slowpath */\n \n /*\n- * Port specifc data structure.\n+ * Port specific data structure.\n  */\n struct bnx2x_port {\n     /*\n      * Port Management Function (for 57711E only).\n      * When this field is set the driver instance is\n-     * responsible for managing port specifc\n+     * responsible for managing port specific\n      * configurations such as handling link attentions.\n      */\n     uint32_t pmf;\n@@ -732,7 +732,7 @@ struct bnx2x_port {\n \n     /*\n      * MCP scratchpad address for port specific statistics.\n-     * The device is responsible for writing statistcss\n+     * The device is responsible for writing statisticss\n      * back to the MCP for use with management firmware such\n      * as UMP/NC-SI.\n      */\n@@ -937,8 +937,8 @@ struct bnx2x_devinfo {\n  * already registered for this port (which means that the user wants storage\n  * services).\n  * 2. During cnic-related load, to know if offload mode is already configured\n- * in the HW or needs to be configrued. Since the transition from nic-mode to\n- * offload-mode in HW causes traffic coruption, nic-mode is configured only\n+ * in the HW or needs to be configured. Since the transition from nic-mode to\n+ * offload-mode in HW causes traffic corruption, nic-mode is configured only\n  * in ports on which storage services where never requested.\n  */\n #define CONFIGURE_NIC_MODE(sc) (!CHIP_IS_E1x(sc) && !CNIC_ENABLED(sc))\n@@ -1709,7 +1709,7 @@ static const uint32_t dmae_reg_go_c[] = {\n \t\t\t GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \\\n \t\t\t GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))\n \n-#define HW_INTERRUT_ASSERT_SET_0 \\\n+#define HW_INTERRUPT_ASSERT_SET_0 \\\n \t\t\t\t(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \\\n \t\t\t\t AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \\\n \t\t\t\t AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \\\n@@ -1722,7 +1722,7 @@ static const uint32_t dmae_reg_go_c[] = {\n \t\t\t\t AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\\\n \t\t\t\t AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\\\n \t\t\t\t AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)\n-#define HW_INTERRUT_ASSERT_SET_1 \\\n+#define HW_INTERRUPT_ASSERT_SET_1 \\\n \t\t\t\t(AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \\\n \t\t\t\t AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \\\n \t\t\t\t AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \\\n@@ -1750,7 +1750,7 @@ static const uint32_t dmae_reg_go_c[] = {\n \t\t\t\t AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \\\n \t\t\t\t AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\\\n \t\t\t\t AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)\n-#define HW_INTERRUT_ASSERT_SET_2 \\\n+#define HW_INTERRUPT_ASSERT_SET_2 \\\n \t\t\t\t(AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \\\n \t\t\t\t AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \\\n \t\t\t\t AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \\\ndiff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c\nindex 1cd97259..b19f7d67 100644\n--- a/drivers/net/bnx2x/bnx2x_stats.c\n+++ b/drivers/net/bnx2x/bnx2x_stats.c\n@@ -551,7 +551,7 @@ bnx2x_bmac_stats_update(struct bnx2x_softc *sc)\n \tUPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);\n \tUPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);\n \tUPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);\n-\tUPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);\n+\tUPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffsetateentered);\n \tUPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);\n \n \tUPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);\n@@ -586,7 +586,7 @@ bnx2x_bmac_stats_update(struct bnx2x_softc *sc)\n \tUPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);\n \tUPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);\n \tUPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);\n-\tUPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);\n+\tUPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffsetateentered);\n \tUPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);\n \tUPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);\n \tUPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);\n@@ -646,7 +646,7 @@ bnx2x_mstat_stats_update(struct bnx2x_softc *sc)\n     ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);\n     ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);\n     ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);\n-    ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);\n+    ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffsetateentered);\n     ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);\n     ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);\n     ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);\n@@ -729,7 +729,7 @@ bnx2x_emac_stats_update(struct bnx2x_softc *sc)\n     UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);\n     UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);\n     UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);\n-    UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);\n+    UPDATE_EXTEND_STAT(rx_stat_xoffsetateentered);\n     UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);\n     UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);\n     UPDATE_EXTEND_STAT(tx_stat_outxonsent);\n@@ -1358,7 +1358,7 @@ bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)\n \n     /*\n      * Prepare the first stats ramrod (will be completed with\n-     * the counters equal to zero) - init counters to somethig different.\n+     * the counters equal to zero) - init counters to something different.\n      */\n     memset(&sc->fw_stats_data->storm_counters, 0xff,\n \t   sizeof(struct stats_counter));\ndiff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h\nindex 635412bd..6a6c4ab9 100644\n--- a/drivers/net/bnx2x/bnx2x_stats.h\n+++ b/drivers/net/bnx2x/bnx2x_stats.h\n@@ -105,8 +105,8 @@ struct bnx2x_eth_stats {\n     uint32_t rx_stat_bmac_xpf_lo;\n     uint32_t rx_stat_bmac_xcf_hi;\n     uint32_t rx_stat_bmac_xcf_lo;\n-    uint32_t rx_stat_xoffstateentered_hi;\n-    uint32_t rx_stat_xoffstateentered_lo;\n+    uint32_t rx_stat_xoffsetateentered_hi;\n+    uint32_t rx_stat_xoffsetateentered_lo;\n     uint32_t rx_stat_xonpauseframesreceived_hi;\n     uint32_t rx_stat_xonpauseframesreceived_lo;\n     uint32_t rx_stat_xoffpauseframesreceived_hi;\n@@ -314,7 +314,7 @@ struct bnx2x_eth_stats_old {\n };\n \n struct bnx2x_eth_q_stats_old {\n-    /* Fields to perserve over fw reset*/\n+    /* Fields to preserve over fw reset*/\n     uint32_t total_unicast_bytes_received_hi;\n     uint32_t total_unicast_bytes_received_lo;\n     uint32_t total_broadcast_bytes_received_hi;\n@@ -328,7 +328,7 @@ struct bnx2x_eth_q_stats_old {\n     uint32_t total_multicast_bytes_transmitted_hi;\n     uint32_t total_multicast_bytes_transmitted_lo;\n \n-    /* Fields to perserve last of */\n+    /* Fields to preserve last of */\n     uint32_t total_bytes_received_hi;\n     uint32_t total_bytes_received_lo;\n     uint32_t total_bytes_transmitted_hi;\ndiff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c\nindex 945e3df8..042d4b29 100644\n--- a/drivers/net/bnx2x/bnx2x_vfpf.c\n+++ b/drivers/net/bnx2x/bnx2x_vfpf.c\n@@ -73,7 +73,7 @@ bnx2x_add_tlv(__rte_unused struct bnx2x_softc *sc, void *tlvs_list,\n \ttl->length = length;\n }\n \n-/* Initiliaze header of the first tlv and clear mailbox*/\n+/* Initialize header of the first tlv and clear mailbox*/\n static void\n bnx2x_vf_prep(struct bnx2x_softc *sc, struct vf_first_tlv *first_tlv,\n \t      uint16_t type, uint16_t length)\ndiff --git a/drivers/net/bnx2x/bnx2x_vfpf.h b/drivers/net/bnx2x/bnx2x_vfpf.h\nindex 95773412..d71e81c0 100644\n--- a/drivers/net/bnx2x/bnx2x_vfpf.h\n+++ b/drivers/net/bnx2x/bnx2x_vfpf.h\n@@ -241,7 +241,7 @@ struct vf_close_tlv {\n \tuint8_t pad[2];\n };\n \n-/* rlease the VF's acquired resources */\n+/* release the VF's acquired resources */\n struct vf_release_tlv {\n \tstruct vf_first_tlv   first_tlv;\n \tuint16_t\t\tvf_id;  /* for debug */\ndiff --git a/drivers/net/bnx2x/ecore_fw_defs.h b/drivers/net/bnx2x/ecore_fw_defs.h\nindex 93bca8ad..6fc1fce7 100644\n--- a/drivers/net/bnx2x/ecore_fw_defs.h\n+++ b/drivers/net/bnx2x/ecore_fw_defs.h\n@@ -379,7 +379,7 @@\n /* temporarily used for RTT */\n #define XSEMI_CLK1_RESUL_CHIP (1e-3)\n \n-/* used for Host Coallescing */\n+/* used for Host Coalescing */\n #define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))\n #define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6))\n \ndiff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h\nindex 5508c536..7955cc37 100644\n--- a/drivers/net/bnx2x/ecore_hsi.h\n+++ b/drivers/net/bnx2x/ecore_hsi.h\n@@ -961,10 +961,10 @@ struct port_feat_cfg {\t\t    /* port 0: 0x454  port 1: 0x4c8 */\n \t\t#define PORT_FEAT_CFG_DCBX_DISABLED                  0x00000000\n \t\t#define PORT_FEAT_CFG_DCBX_ENABLED                   0x00000100\n \n-    #define PORT_FEAT_CFG_AUTOGREEEN_MASK               0x00000200\n-\t    #define PORT_FEAT_CFG_AUTOGREEEN_SHIFT               9\n-\t    #define PORT_FEAT_CFG_AUTOGREEEN_DISABLED            0x00000000\n-\t    #define PORT_FEAT_CFG_AUTOGREEEN_ENABLED             0x00000200\n+    #define PORT_FEAT_CFG_AUTOGREEN_MASK               0x00000200\n+\t    #define PORT_FEAT_CFG_AUTOGREEN_SHIFT               9\n+\t    #define PORT_FEAT_CFG_AUTOGREEN_DISABLED            0x00000000\n+\t    #define PORT_FEAT_CFG_AUTOGREEN_ENABLED             0x00000200\n \n \t#define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK                0x00000C00\n \t#define PORT_FEAT_CFG_STORAGE_PERSONALITY_SHIFT               10\n@@ -1062,7 +1062,7 @@ struct port_feat_cfg {\t\t    /* port 0: 0x454  port 1: 0x4c8 */\n \t\t#define PORT_FEATURE_MBA_LINK_SPEED_20G              0x20000000\n \n \t/* Secondary MBA configuration,\n-\t * see mba_config for the fileds defination.\n+\t * see mba_config for the fields definition.\n \t */\n \tuint32_t mba_config2;\n \n@@ -1070,12 +1070,12 @@ struct port_feat_cfg {\t\t    /* port 0: 0x454  port 1: 0x4c8 */\n \t#define PORT_FEATURE_MBA_VLAN_TAG_MASK              0x0000FFFF\n \t#define PORT_FEATURE_MBA_VLAN_TAG_SHIFT                      0\n \t#define PORT_FEATURE_MBA_VLAN_EN                    0x00010000\n-\t#define PORT_FEATUTE_BOFM_CFGD_EN                   0x00020000\n+\t#define PORT_FEATURE_BOFM_CFGD_EN                   0x00020000\n \t#define PORT_FEATURE_BOFM_CFGD_FTGT                 0x00040000\n \t#define PORT_FEATURE_BOFM_CFGD_VEN                  0x00080000\n \n \t/* Secondary MBA configuration,\n-\t * see mba_vlan_cfg for the fileds defination.\n+\t * see mba_vlan_cfg for the fields definition.\n \t */\n \tuint32_t mba_vlan_cfg2;\n \n@@ -1429,7 +1429,7 @@ struct extended_dev_info_shared_cfg {             /* NVRAM OFFSET */\n \t#define EXTENDED_DEV_INFO_SHARED_CFG_DBG_GEN3_COMPLI_ENA      0x00080000\n \n \t/*  Override Rx signal detect threshold when enabled the threshold\n-\t * will be set staticaly\n+\t * will be set statically\n \t */\n \t#define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_RX_SIG_MASK     0x00100000\n \t#define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_RX_SIG_SHIFT    20\n@@ -2189,9 +2189,9 @@ struct eee_remote_vals {\n  * elements on a per byte or word boundary.\n  *\n  * example: an array with 8 entries each 4 bit wide. This array will fit into\n- * a single dword. The diagrmas below show the array order of the nibbles.\n+ * a single dword. The diagrams below show the array order of the nibbles.\n  *\n- * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:\n+ * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the standard ordering:\n  *\n  *                |                |                |               |\n  *   0    |   1   |   2    |   3   |   4    |   5   |   6   |   7   |\n@@ -2519,17 +2519,17 @@ struct shmem_lfa {\n };\n \n /*\n- * Used to suppoert NSCI get OS driver version\n+ * Used to support NSCI get OS driver version\n  * On driver load the version value will be set\n  * On driver unload driver value of 0x0 will be set\n  */\n struct os_drv_ver {\n \t#define DRV_VER_NOT_LOADED                      0\n-\t/*personalites orrder is importent */\n+\t/*personalities order is important */\n \t#define DRV_PERS_ETHERNET                       0\n \t#define DRV_PERS_ISCSI                          1\n \t#define DRV_PERS_FCOE                           2\n-\t/*shmem2 struct is constatnt can't add more personalites here*/\n+\t/*shmem2 struct is constant can't add more personalities here*/\n \t#define MAX_DRV_PERS                            3\n \tuint32_t  versions[MAX_DRV_PERS];\n };\n@@ -2754,8 +2754,8 @@ struct shmem2_region {\n \n \tstruct eee_remote_vals eee_remote_vals[PORT_MAX];\t/* 0x0110 */\n \tuint32_t pf_allocation[E2_FUNC_MAX];\t\t\t/* 0x0120 */\n-\t#define PF_ALLOACTION_MSIX_VECTORS_MASK    0x000000ff /* real value, as PCI config space can show only maximum of 64 vectors */\n-\t#define PF_ALLOACTION_MSIX_VECTORS_SHIFT   0\n+\t#define PF_ALLOCATION_MSIX_VECTORS_MASK    0x000000ff /* real value, as PCI config space can show only maximum of 64 vectors */\n+\t#define PF_ALLOCATION_MSIX_VECTORS_SHIFT   0\n \n \t/* the status of EEE auto-negotiation\n \t * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31.\n@@ -2821,7 +2821,7 @@ struct shmem2_region {\n \t/* Flag to the driver that PF's drv_info_host_addr buffer was read */\n \tuint32_t mfw_drv_indication;\t\t\t/* Offset 0x19c */\n \n-\t/* We use inidcation for each PF (0..3) */\n+\t/* We use indication for each PF (0..3) */\n \t#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_)  (1 << (_pf_))\n \n \tunion { /* For various OEMs */\t\t\t/* Offset 0x1a0 */\n@@ -2940,7 +2940,7 @@ struct emac_stats {\n \tuint32_t     rx_stat_xonpauseframesreceived;\n \tuint32_t     rx_stat_xoffpauseframesreceived;\n \tuint32_t     rx_stat_maccontrolframesreceived;\n-\tuint32_t     rx_stat_xoffstateentered;\n+\tuint32_t     rx_stat_xoffsetateentered;\n \tuint32_t     rx_stat_dot3statsframestoolong;\n \tuint32_t     rx_stat_etherstatsjabbers;\n \tuint32_t     rx_stat_etherstatsundersizepkts;\n@@ -3378,8 +3378,8 @@ struct mac_stx {\n \tuint32_t     rx_stat_mac_xcf_lo;\n \n \t/* xoff_state_entered */\n-\tuint32_t     rx_stat_xoffstateentered_hi;\n-\tuint32_t     rx_stat_xoffstateentered_lo;\n+\tuint32_t     rx_stat_xoffsetateentered_hi;\n+\tuint32_t     rx_stat_xoffsetateentered_lo;\n \t/* pause_xon_frames_received */\n \tuint32_t     rx_stat_xonpauseframesreceived_hi;\n \tuint32_t     rx_stat_xonpauseframesreceived_lo;\n@@ -6090,8 +6090,8 @@ struct fw_version {\n \tuint32_t flags;\n #define FW_VERSION_OPTIMIZED (0x1 << 0)\n #define FW_VERSION_OPTIMIZED_SHIFT 0\n-#define FW_VERSION_BIG_ENDIEN (0x1 << 1)\n-#define FW_VERSION_BIG_ENDIEN_SHIFT 1\n+#define FW_VERSION_BIG_ENDIAN (0x1 << 1)\n+#define FW_VERSION_BIG_ENDIAN_SHIFT 1\n #define FW_VERSION_CHIP_VERSION (0x3 << 2)\n #define FW_VERSION_CHIP_VERSION_SHIFT 2\n #define __FW_VERSION_RESERVED (0xFFFFFFF << 4)\n@@ -6195,7 +6195,7 @@ struct hc_sb_data {\n \n \n /*\n- * Segment types for host coaslescing\n+ * Segment types for host coalescing\n  */\n enum hc_segment {\n \tHC_REGULAR_SEGMENT,\n@@ -6242,7 +6242,7 @@ struct hc_status_block_data_e2 {\n \n \n /*\n- * IGU block operartion modes (in Everest2)\n+ * IGU block operation modes (in Everest2)\n  */\n enum igu_mode {\n \tHC_IGU_BC_MODE,\n@@ -6407,8 +6407,8 @@ struct pram_fw_version {\n #define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0\n #define PRAM_FW_VERSION_STORM_ID (0x3 << 1)\n #define PRAM_FW_VERSION_STORM_ID_SHIFT 1\n-#define PRAM_FW_VERSION_BIG_ENDIEN (0x1 << 3)\n-#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3\n+#define PRAM_FW_VERSION_BIG_ENDIAN (0x1 << 3)\n+#define PRAM_FW_VERSION_BIG_ENDIAN_SHIFT 3\n #define PRAM_FW_VERSION_CHIP_VERSION (0x3 << 4)\n #define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4\n #define __PRAM_FW_VERSION_RESERVED0 (0x3 << 6)\n@@ -6508,7 +6508,7 @@ struct stats_query_header {\n \n \n /*\n- * Types of statistcis query entry\n+ * Types of statistics query entry\n  */\n enum stats_query_type {\n \tSTATS_TYPE_QUEUE,\n@@ -6542,7 +6542,7 @@ enum storm_id {\n \n \n /*\n- * Taffic types used in ETS and flow control algorithms\n+ * Traffic types used in ETS and flow control algorithms\n  */\n enum traffic_type {\n \tLLFC_TRAFFIC_TYPE_NW,\ndiff --git a/drivers/net/bnx2x/ecore_init.h b/drivers/net/bnx2x/ecore_init.h\nindex 4e348612..a339c0bf 100644\n--- a/drivers/net/bnx2x/ecore_init.h\n+++ b/drivers/net/bnx2x/ecore_init.h\n@@ -288,7 +288,7 @@ static inline void ecore_dcb_config_qm(struct bnx2x_softc *sc, enum cos_mode mod\n  *\n  * IMPORTANT REMARKS:\n  * 1. the cmng_init struct does not represent the contiguous internal ram\n- *    structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET\n+ *    structure. the driver should use the XSTORM_CMNG_PER_PORT_VARS_OFFSET\n  *    offset in order to write the port sub struct and the\n  *    PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other\n  *    words - don't use memcpy!).\ndiff --git a/drivers/net/bnx2x/ecore_init_ops.h b/drivers/net/bnx2x/ecore_init_ops.h\nindex 0945e799..4ed811fd 100644\n--- a/drivers/net/bnx2x/ecore_init_ops.h\n+++ b/drivers/net/bnx2x/ecore_init_ops.h\n@@ -534,7 +534,7 @@ static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order,\n \t\tREG_WR(sc, PXP2_REG_WR_CDU_MPS, val);\n \t}\n \n-\t/* Validate number of tags suppoted by device */\n+\t/* Validate number of tags supported by device */\n #define PCIE_REG_PCIER_TL_HDR_FC_ST\t\t0x2980\n \tval = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST);\n \tval &= 0xFF;\n@@ -714,7 +714,7 @@ static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc,\n \tfor (i = ilt_cli->start; i <= ilt_cli->end; i++)\n \t\tecore_ilt_line_init_op(sc, ilt, i, initop);\n \n-\t/* init/clear the ILT boundries */\n+\t/* init/clear the ILT boundaries */\n \tecore_ilt_boundary_init_op(sc, ilt_cli, ilt->start_line, initop);\n }\n \n@@ -765,7 +765,7 @@ static void ecore_ilt_init_client_psz(struct bnx2x_softc *sc, int cli_num,\n \n /*\n  * called during init common stage, ilt clients should be initialized\n- * prioir to calling this function\n+ * prior to calling this function\n  */\n static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop)\n {\ndiff --git a/drivers/net/bnx2x/ecore_reg.h b/drivers/net/bnx2x/ecore_reg.h\nindex bb92d131..6b220bc5 100644\n--- a/drivers/net/bnx2x/ecore_reg.h\n+++ b/drivers/net/bnx2x/ecore_reg.h\n@@ -19,7 +19,7 @@\n #define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\t\t (0x1 << 3)\n #define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\t\t\t (0x1 << 4)\n #define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\t\t (0x1 << 1)\n-/* [R 1] ATC initalization done */\n+/* [R 1] ATC initialization done */\n #define ATC_REG_ATC_INIT_DONE\t\t\t\t\t 0x1100bc\n /* [RW 6] Interrupt mask register #0 read/write */\n #define ATC_REG_ATC_INT_MASK\t\t\t\t\t 0x1101c8\n@@ -56,7 +56,7 @@\n #define BRB1_REG_PAUSE_HIGH_THRESHOLD_0\t\t\t\t 0x60078\n /* [RW 10] Write client 0: Assert pause threshold. Not Functional */\n #define BRB1_REG_PAUSE_LOW_THRESHOLD_0\t\t\t\t 0x60068\n-/* [R 24] The number of full blocks occpied by port. */\n+/* [R 24] The number of full blocks occupied by port. */\n #define BRB1_REG_PORT_NUM_OCC_BLOCKS_0\t\t\t\t 0x60094\n /* [R 5] Used to read the value of the XX protection CAM occupancy counter. */\n #define CCM_REG_CAM_OCCUP\t\t\t\t\t 0xd0188\n@@ -456,7 +456,7 @@\n #define IGU_REG_PCI_PF_MSIX_FUNC_MASK\t\t\t\t 0x130148\n #define IGU_REG_PCI_PF_MSI_EN\t\t\t\t\t 0x130140\n /* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no\n- * pending; 1 = pending. Pendings means interrupt was asserted; and write\n+ * pending; 1 = pending. Pending means interrupt was asserted; and write\n  * done was not received. Data valid only in addresses 0-4. all the rest are\n  * zero.\n  */\n@@ -1059,14 +1059,14 @@\n /* [R 28] this field hold the last information that caused reserved\n  * attention. bits [19:0] - address; [22:20] function; [23] reserved;\n  * [27:24] the master that caused the attention - according to the following\n- * encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =\n+ * encoding:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =\n  * dbu; 8 = dmae\n  */\n #define MISC_REG_GRC_RSV_ATTN\t\t\t\t\t 0xa3c0\n /* [R 28] this field hold the last information that caused timeout\n  * attention. bits [19:0] - address; [22:20] function; [23] reserved;\n  * [27:24] the master that caused the attention - according to the following\n- * encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =\n+ * encoding:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =\n  * dbu; 8 = dmae\n  */\n #define MISC_REG_GRC_TIMEOUT_ATTN\t\t\t\t 0xa3c4\n@@ -1398,11 +1398,11 @@\n  * ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0\n  */\n #define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0\t\t\t 0x102f8\n-/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit;\n+/* [RW 1] Port0: If set along with the led_control_override_traffic_p0 bit;\n  * turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also\n  * set; the LED will blink with blink rate specified in\n  * ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and\n- * ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0\n+ * ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0\n  * fields.\n  */\n #define NIG_REG_LED_CONTROL_TRAFFIC_P0\t\t\t\t 0x10300\n@@ -1567,7 +1567,7 @@\n  * MAC DA 2. The reset default is set to mask out all parameters.\n  */\n #define NIG_REG_P0_LLH_PTP_PARAM_MASK\t\t\t\t 0x187a0\n-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set\n+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set\n  * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .\n  * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP\n  * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;\n@@ -1672,7 +1672,7 @@\n  * MAC DA 2. The reset default is set to mask out all parameters.\n  */\n #define NIG_REG_P0_TLLH_PTP_PARAM_MASK\t\t\t\t 0x187f0\n-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set\n+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set\n  * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .\n  * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP\n  * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;\n@@ -1839,7 +1839,7 @@\n  * MAC DA 2. The reset default is set to mask out all parameters.\n  */\n #define NIG_REG_P1_LLH_PTP_PARAM_MASK\t\t\t\t 0x187c8\n-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set\n+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set\n  * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .\n  * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP\n  * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;\n@@ -1926,7 +1926,7 @@\n  * MAC DA 2. The reset default is set to mask out all parameters.\n  */\n #define NIG_REG_P1_TLLH_PTP_PARAM_MASK\t\t\t\t 0x187f8\n-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set\n+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set\n  * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .\n  * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP\n  * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;\n@@ -2306,7 +2306,7 @@\n #define PBF_REG_HDRS_AFTER_BASIC\t\t\t\t 0x15c0a8\n /* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */\n #define PBF_REG_HDRS_AFTER_TAG_0\t\t\t\t 0x15c0b8\n-/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest\n+/* [R 1] Removed for E3 B0 - Indicates which COS is connected to the highest\n  * priority in the command arbiter.\n  */\n #define PBF_REG_HIGH_PRIORITY_COS_NUM\t\t\t\t 0x15c04c\n@@ -2366,7 +2366,7 @@\n  */\n #define PBF_REG_NUM_STRICT_ARB_SLOTS\t\t\t\t 0x15c064\n /* [R 11] Removed for E3 B0 - Port 0 threshold used by arbiter in 16 byte\n- * lines used when pause not suppoterd.\n+ * lines used when pause not supported.\n  */\n #define PBF_REG_P0_ARB_THRSH\t\t\t\t\t 0x1400e4\n /* [R 11] Removed for E3 B0 - Current credit for port 0 in the tx port\n@@ -3503,7 +3503,7 @@\n  * queues.\n  */\n #define QM_REG_OVFERROR\t\t\t\t\t\t 0x16805c\n-/* [RC 6] the Q were the qverflow occurs */\n+/* [RC 6] the Q were the overflow occurs */\n #define QM_REG_OVFQNUM\t\t\t\t\t\t 0x168058\n /* [R 16] Pause state for physical queues 15-0 */\n #define QM_REG_PAUSESTATE0\t\t\t\t\t 0x168410\n@@ -4570,8 +4570,8 @@\n #define PCICFG_COMMAND_RESERVED\t\t\t(0x1f<<11)\n #define PCICFG_STATUS_OFFSET\t\t\t\t0x06\n #define PCICFG_REVISION_ID_OFFSET\t\t\t0x08\n-#define PCICFG_REVESION_ID_MASK\t\t\t0xff\n-#define PCICFG_REVESION_ID_ERROR_VAL\t\t0xff\n+#define PCICFG_REVISION_ID_MASK\t\t\t0xff\n+#define PCICFG_REVISION_ID_ERROR_VAL\t\t0xff\n #define PCICFG_CACHE_LINE_SIZE\t\t\t\t0x0c\n #define PCICFG_LATENCY_TIMER\t\t\t\t0x0d\n #define PCICFG_HEADER_TYPE\t\t\t\t0x0e\n@@ -4890,7 +4890,7 @@\n \tif set, generate pcie_err_attn output when this error is seen. WC \\\n \t*/\n #define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2 \\\n-\t(1 << 3) /* Receive UR Statusfor Function 2. If set, generate \\\n+\t(1 << 3) /* Receive UR Status for Function 2. If set, generate \\\n \tpcie_err_attn output when this error is seen. WC */\n #define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2 \\\n \t(1 << 2) /* Completer Timeout Status Status for Function 2, if \\\n@@ -4986,7 +4986,7 @@\n \tif set, generate pcie_err_attn output when this error is seen. WC \\\n \t*/\n #define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5 \\\n-\t(1 << 3) /* Receive UR Statusfor Function 5. If set, generate \\\n+\t(1 << 3) /* Receive UR Status for Function 5. If set, generate \\\n \tpcie_err_attn output when this error is seen. WC */\n #define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5 \\\n \t(1 << 2) /* Completer Timeout Status Status for Function 5, if \\\n@@ -5272,8 +5272,8 @@\n #define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS\t\t0x0008\n #define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE\t0x0010\n #define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE\t0x0020\n-#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE\t0x0040\n-#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE\t0x0080\n+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RESOLUTION_TXSIDE\t0x0040\n+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RESOLUTION_RXSIDE\t0x0080\n #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK\t\t0x3f00\n #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M\t\t0x0000\n #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M\t\t0x0100\ndiff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c\nindex 0075422e..6c727f2f 100644\n--- a/drivers/net/bnx2x/ecore_sp.c\n+++ b/drivers/net/bnx2x/ecore_sp.c\n@@ -1338,7 +1338,7 @@ static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,\n \tif (rc != ECORE_SUCCESS) {\n \t\t__ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);\n \n-\t\t/** Calling function should not diffrentiate between this case\n+\t\t/** Calling function should not differentiate between this case\n \t\t *  and the case in which there is already a pending ramrod\n \t\t */\n \t\trc = ECORE_PENDING;\n@@ -2246,7 +2246,7 @@ struct ecore_pending_mcast_cmd {\n \tunion {\n \t\tecore_list_t macs_head;\n \t\tuint32_t macs_num;\t/* Needed for DEL command */\n-\t\tint next_bin;\t/* Needed for RESTORE flow with aprox match */\n+\t\tint next_bin;\t/* Needed for RESTORE flow with approx match */\n \t} data;\n \n \tint done;\t\t/* set to TRUE, when the command has been handled,\n@@ -2352,11 +2352,11 @@ static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)\n \tint i, j, inner_start = last % BIT_VEC64_ELEM_SZ;\n \n \tfor (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {\n-\t\tif (o->registry.aprox_match.vec[i])\n+\t\tif (o->registry.approx_match.vec[i])\n \t\t\tfor (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {\n \t\t\t\tint cur_bit = j + BIT_VEC64_ELEM_SZ * i;\n \t\t\t\tif (BIT_VEC64_TEST_BIT\n-\t\t\t\t    (o->registry.aprox_match.vec, cur_bit)) {\n+\t\t\t\t    (o->registry.approx_match.vec, cur_bit)) {\n \t\t\t\t\treturn cur_bit;\n \t\t\t\t}\n \t\t\t}\n@@ -2379,7 +2379,7 @@ static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)\n \tint cur_bit = ecore_mcast_get_next_bin(o, 0);\n \n \tif (cur_bit >= 0)\n-\t\tBIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);\n+\t\tBIT_VEC64_CLEAR_BIT(o->registry.approx_match.vec, cur_bit);\n \n \treturn cur_bit;\n }\n@@ -2421,7 +2421,7 @@ static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,\n \tswitch (cmd) {\n \tcase ECORE_MCAST_CMD_ADD:\n \t\tbin = ecore_mcast_bin_from_mac(cfg_data->mac);\n-\t\tBIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);\n+\t\tBIT_VEC64_SET_BIT(o->registry.approx_match.vec, bin);\n \t\tbreak;\n \n \tcase ECORE_MCAST_CMD_DEL:\n@@ -2812,7 +2812,7 @@ static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o)\n \tuint64_t elem;\n \n \tfor (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {\n-\t\telem = o->registry.aprox_match.vec[i];\n+\t\telem = o->registry.approx_match.vec[i];\n \t\tfor (; elem; cnt++)\n \t\t\telem &= elem - 1;\n \t}\n@@ -2950,7 +2950,7 @@ static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,\n \t\t     bit);\n \n \t\t/* bookkeeping... */\n-\t\tBIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit);\n+\t\tBIT_VEC64_SET_BIT(o->registry.approx_match.vec, bit);\n \t}\n }\n \n@@ -2998,8 +2998,8 @@ static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,\n \t\t\tECORE_MSG(sc, \"Invalidating multicast MACs configuration\");\n \n \t\t\t/* clear the registry */\n-\t\t\tECORE_MEMSET(o->registry.aprox_match.vec, 0,\n-\t\t\t\t     sizeof(o->registry.aprox_match.vec));\n+\t\t\tECORE_MEMSET(o->registry.approx_match.vec, 0,\n+\t\t\t\t     sizeof(o->registry.approx_match.vec));\n \t\t\tbreak;\n \n \t\tcase ECORE_MCAST_CMD_RESTORE:\n@@ -3016,8 +3016,8 @@ static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,\n \t\t\tREG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);\n \t} else\n \t\t/* clear the registry */\n-\t\tECORE_MEMSET(o->registry.aprox_match.vec, 0,\n-\t\t\t     sizeof(o->registry.aprox_match.vec));\n+\t\tECORE_MEMSET(o->registry.approx_match.vec, 0,\n+\t\t\t     sizeof(o->registry.approx_match.vec));\n \n \t/* We are done */\n \tr->clear_pending(r);\n@@ -3025,15 +3025,15 @@ static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,\n \treturn ECORE_SUCCESS;\n }\n \n-static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)\n+static int ecore_mcast_get_registry_size_approx(struct ecore_mcast_obj *o)\n {\n-\treturn o->registry.aprox_match.num_bins_set;\n+\treturn o->registry.approx_match.num_bins_set;\n }\n \n-static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,\n+static void ecore_mcast_set_registry_size_approx(struct ecore_mcast_obj *o,\n \t\t\t\t\t\tint n)\n {\n-\to->registry.aprox_match.num_bins_set = n;\n+\to->registry.approx_match.num_bins_set = n;\n }\n \n int ecore_config_mcast(struct bnx2x_softc *sc,\n@@ -3163,9 +3163,9 @@ void ecore_init_mcast_obj(struct bnx2x_softc *sc,\n \t\tmcast_obj->validate = ecore_mcast_validate_e1h;\n \t\tmcast_obj->revert = ecore_mcast_revert_e1h;\n \t\tmcast_obj->get_registry_size =\n-\t\t    ecore_mcast_get_registry_size_aprox;\n+\t\t    ecore_mcast_get_registry_size_approx;\n \t\tmcast_obj->set_registry_size =\n-\t\t    ecore_mcast_set_registry_size_aprox;\n+\t\t    ecore_mcast_set_registry_size_approx;\n \t} else {\n \t\tmcast_obj->config_mcast = ecore_mcast_setup_e2;\n \t\tmcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;\n@@ -3177,9 +3177,9 @@ void ecore_init_mcast_obj(struct bnx2x_softc *sc,\n \t\tmcast_obj->validate = ecore_mcast_validate_e2;\n \t\tmcast_obj->revert = ecore_mcast_revert_e2;\n \t\tmcast_obj->get_registry_size =\n-\t\t    ecore_mcast_get_registry_size_aprox;\n+\t\t    ecore_mcast_get_registry_size_approx;\n \t\tmcast_obj->set_registry_size =\n-\t\t    ecore_mcast_set_registry_size_aprox;\n+\t\t    ecore_mcast_set_registry_size_approx;\n \t}\n }\n \n@@ -3424,7 +3424,7 @@ void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,\n \t} else {\n \n \t\t/*\n-\t\t * CAM credit is equaly divided between all active functions\n+\t\t * CAM credit is equally divided between all active functions\n \t\t * on the PATH.\n \t\t */\n \t\tif (func_num > 0) {\ndiff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h\nindex d58072da..a5276475 100644\n--- a/drivers/net/bnx2x/ecore_sp.h\n+++ b/drivers/net/bnx2x/ecore_sp.h\n@@ -430,7 +430,7 @@ enum {\n \tRAMROD_RESTORE,\n \t /* Execute the next command now */\n \tRAMROD_EXEC,\n-\t/* Don't add a new command and continue execution of posponed\n+\t/* Don't add a new command and continue execution of postponed\n \t * commands. If not set a new command will be added to the\n \t * pending commands list.\n \t */\n@@ -974,7 +974,7 @@ struct ecore_mcast_obj {\n \t\t\t *  properly create DEL commands.\n \t\t\t */\n \t\t\tint num_bins_set;\n-\t\t} aprox_match;\n+\t\t} approx_match;\n \n \t\tstruct {\n \t\t\tecore_list_t macs;\n@@ -1173,7 +1173,7 @@ struct ecore_rss_config_obj {\n \t/* Last configured indirection table */\n \tuint8_t\t\t\tind_table[T_ETH_INDIRECTION_TABLE_SIZE];\n \n-\t/* flags for enabling 4-tupple hash on UDP */\n+\t/* flags for enabling 4-tuple hash on UDP */\n \tuint8_t\t\t\tudp_rss_v4;\n \tuint8_t\t\t\tudp_rss_v6;\n \n@@ -1285,7 +1285,7 @@ enum ecore_q_type {\n #define ECORE_MULTI_TX_COS_E3B0\t\t\t3\n #define ECORE_MULTI_TX_COS\t\t\t3 /* Maximum possible */\n #define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN)\n-/* DMAE channel to be used by FW for timesync workaroun. A driver that sends\n+/* DMAE channel to be used by FW for timesync workaround. A driver that sends\n  * timesync-related ramrods must not use this DMAE command ID.\n  */\n #define FW_DMAE_CMD_ID 6\ndiff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c\nindex 2093d8f3..838ad351 100644\n--- a/drivers/net/bnx2x/elink.c\n+++ b/drivers/net/bnx2x/elink.c\n@@ -147,8 +147,8 @@\n \t#define\tMDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS\t\t0x0008\n \t#define\tMDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE\t0x0010\n \t#define\tMDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE\t0x0020\n-\t#define\tMDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE\t0x0040\n-\t#define\tMDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE\t0x0080\n+\t#define\tMDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RESOLUTION_TXSIDE\t0x0040\n+\t#define\tMDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RESOLUTION_RXSIDE\t0x0080\n \t#define\tMDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK\t\t0x3f00\n \t#define\tMDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M\t\t0x0000\n \t#define\tMDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M\t\t0x0100\n@@ -746,7 +746,7 @@ typedef elink_status_t (*read_sfp_module_eeprom_func_p)(struct elink_phy *phy,\n /********************************************************/\n #define ELINK_ETH_HLEN\t\t\t14\n /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */\n-#define ELINK_ETH_OVREHEAD\t\t\t(ELINK_ETH_HLEN + 8 + 8)\n+#define ELINK_ETH_OVERHEAD\t\t\t(ELINK_ETH_HLEN + 8 + 8)\n #define ELINK_ETH_MIN_PACKET_SIZE\t\t60\n #define ELINK_ETH_MAX_PACKET_SIZE\t\t1500\n #define ELINK_ETH_MAX_JUMBO_PACKET_SIZE\t9600\n@@ -814,10 +814,10 @@ typedef elink_status_t (*read_sfp_module_eeprom_func_p)(struct elink_phy *phy,\n \t\t\t\tSHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT\n #define ELINK_AUTONEG_REMOTE_PHY\tSHARED_HW_CFG_AN_ENABLE_REMOTE_PHY\n \n-#define ELINK_GP_STATUS_PAUSE_RSOLUTION_TXSIDE \\\n-\t\t\tMDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE\n-#define ELINK_GP_STATUS_PAUSE_RSOLUTION_RXSIDE \\\n-\t\t\tMDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE\n+#define ELINK_GP_STATUS_PAUSE_RESOLUTION_TXSIDE \\\n+\t\t\tMDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RESOLUTION_TXSIDE\n+#define ELINK_GP_STATUS_PAUSE_RESOLUTION_RXSIDE \\\n+\t\t\tMDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RESOLUTION_RXSIDE\n #define ELINK_GP_STATUS_SPEED_MASK \\\n \t\t\tMDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK\n #define ELINK_GP_STATUS_10M\tMDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M\n@@ -1460,7 +1460,7 @@ static void elink_ets_e3b0_pbf_disabled(const struct elink_params *params)\n }\n /******************************************************************************\n  * Description:\n- *\tE3B0 disable will return basicly the values to init values.\n+ *\tE3B0 disable will return basically the values to init values.\n  *.\n  ******************************************************************************/\n static elink_status_t elink_ets_e3b0_disabled(const struct elink_params *params,\n@@ -1483,7 +1483,7 @@ static elink_status_t elink_ets_e3b0_disabled(const struct elink_params *params,\n \n /******************************************************************************\n  * Description:\n- *\tDisable will return basicly the values to init values.\n+ *\tDisable will return basically the values to init values.\n  *\n  ******************************************************************************/\n elink_status_t elink_ets_disabled(struct elink_params *params,\n@@ -1506,7 +1506,7 @@ elink_status_t elink_ets_disabled(struct elink_params *params,\n \n /******************************************************************************\n  * Description\n- *\tSet the COS mappimg to SP and BW until this point all the COS are not\n+ *\tSet the COS mapping to SP and BW until this point all the COS are not\n  *\tset as SP or BW.\n  ******************************************************************************/\n static elink_status_t elink_ets_e3b0_cli_map(const struct elink_params *params,\n@@ -1652,7 +1652,7 @@ static elink_status_t elink_ets_e3b0_get_total_bw(\n \t\t}\n \t\tELINK_DEBUG_P0(sc,\n \t\t   \"elink_ets_E3B0_config total BW should be 100\");\n-\t\t/* We can handle a case whre the BW isn't 100 this can happen\n+\t\t/* We can handle a case where the BW isn't 100 this can happen\n \t\t * if the TC are joined.\n \t\t */\n \t}\n@@ -2608,7 +2608,7 @@ static elink_status_t elink_emac_enable(struct elink_params *params,\n \tREG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 1);\n \n #ifdef ELINK_INCLUDE_EMUL\n-\t/* for paladium */\n+\t/* for palladium */\n \tif (CHIP_REV_IS_EMUL(sc)) {\n \t\t/* Use lane 1 (of lanes 0-3) */\n \t\tREG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 1);\n@@ -2726,7 +2726,7 @@ static elink_status_t elink_emac_enable(struct elink_params *params,\n \t/* Enable emac for jumbo packets */\n \telink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_RX_MTU_SIZE,\n \t\t(EMAC_RX_MTU_SIZE_JUMBO_ENA |\n-\t\t (ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD)));\n+\t\t (ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVERHEAD)));\n \n \t/* Strip CRC */\n \tREG_WR(sc, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port * 4, 0x1);\n@@ -2850,7 +2850,7 @@ static void elink_update_pfc_bmac2(struct elink_params *params,\n \n \t/* Set Time (based unit is 512 bit time) between automatic\n \t * re-sending of PP packets amd enable automatic re-send of\n-\t * Per-Priroity Packet as long as pp_gen is asserted and\n+\t * Per-Priority Packet as long as pp_gen is asserted and\n \t * pp_disable is low.\n \t */\n \tval = 0x8000;\n@@ -3124,19 +3124,19 @@ static elink_status_t elink_bmac1_enable(struct elink_params *params,\n \tREG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);\n \n \t/* Set rx mtu */\n-\twb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD;\n+\twb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVERHEAD;\n \twb_data[1] = 0;\n \tREG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);\n \n \telink_update_pfc_bmac1(params, vars);\n \n \t/* Set tx mtu */\n-\twb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD;\n+\twb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVERHEAD;\n \twb_data[1] = 0;\n \tREG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);\n \n \t/* Set cnt max size */\n-\twb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD;\n+\twb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVERHEAD;\n \twb_data[1] = 0;\n \tREG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);\n \n@@ -3203,18 +3203,18 @@ static elink_status_t elink_bmac2_enable(struct elink_params *params,\n \tDELAY(30);\n \n \t/* Set RX MTU */\n-\twb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD;\n+\twb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVERHEAD;\n \twb_data[1] = 0;\n \tREG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);\n \tDELAY(30);\n \n \t/* Set TX MTU */\n-\twb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD;\n+\twb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVERHEAD;\n \twb_data[1] = 0;\n \tREG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);\n \tDELAY(30);\n \t/* Set cnt max size */\n-\twb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD - 2;\n+\twb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVERHEAD - 2;\n \twb_data[1] = 0;\n \tREG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);\n \tDELAY(30);\n@@ -3339,7 +3339,7 @@ static elink_status_t elink_pbf_update(struct elink_params *params,\n \n \t} else {\n \t\tuint32_t thresh = (ELINK_ETH_MAX_JUMBO_PACKET_SIZE +\n-\t\t\t      ELINK_ETH_OVREHEAD) / 16;\n+\t\t\t      ELINK_ETH_OVERHEAD) / 16;\n \t\tREG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0);\n \t\t/* Update threshold */\n \t\tREG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, thresh);\n@@ -3369,7 +3369,7 @@ static elink_status_t elink_pbf_update(struct elink_params *params,\n }\n \n /**\n- * elink_get_emac_base - retrive emac base address\n+ * elink_get_emac_base - retrieve emac base address\n  *\n  * @bp:\t\t\tdriver handle\n  * @mdc_mdio_access:\taccess type\n@@ -4518,7 +4518,7 @@ static void elink_warpcore_enable_AN_KR2(struct elink_phy *phy,\n \t\telink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,\n \t\t\t\t reg_set[i].val);\n \n-\t/* Start KR2 work-around timer which handles BNX2X8073 link-parner */\n+\t/* Start KR2 work-around timer which handles BNX2X8073 link-partner */\n \tparams->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;\n \telink_update_link_attr(params, params->link_attr_sync);\n }\n@@ -7824,7 +7824,7 @@ elink_status_t elink_link_update(struct elink_params *params,\n \t\t\t * hence its link is expected to be down\n \t\t\t * - SECOND_PHY means that first phy should not be able\n \t\t\t * to link up by itself (using configuration)\n-\t\t\t * - DEFAULT should be overridden during initialiazation\n+\t\t\t * - DEFAULT should be overridden during initialization\n \t\t\t */\n \t\t\t\tELINK_DEBUG_P1(sc, \"Invalid link indication\"\n \t\t\t\t\t       \" mpc=0x%x. DISABLING LINK !!!\",\n@@ -10991,7 +10991,7 @@ static elink_status_t elink_84858_cmd_hdlr(struct elink_phy *phy,\n \t\tELINK_DEBUG_P0(sc, \"FW cmd failed.\");\n \t\treturn ELINK_STATUS_ERROR;\n \t}\n-\t/* Step5: Once the command has completed, read the specficied DATA\n+\t/* Step5: Once the command has completed, read the specified DATA\n \t * registers for any saved results for the command, if applicable\n \t */\n \n@@ -12102,7 +12102,7 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,\n \t\tif (phy->flags & ELINK_FLAGS_EEE) {\n \t\t\t/* Handle legacy auto-grEEEn */\n \t\t\tif (params->feature_config_flags &\n-\t\t\t    ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED) {\n+\t\t\t    ELINK_FEATURE_CONFIG_AUTOGREEN_ENABLED) {\n \t\t\t\ttemp = 6;\n \t\t\t\tELINK_DEBUG_P0(sc, \"Enabling Auto-GrEEEn\");\n \t\t\t} else {\ndiff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h\nindex 6b2e85f1..1dd9b799 100644\n--- a/drivers/net/bnx2x/elink.h\n+++ b/drivers/net/bnx2x/elink.h\n@@ -403,7 +403,7 @@ struct elink_params {\n #define ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC\t\t\t(1 << 6)\n #define ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC\t\t\t(1 << 7)\n #define ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX\t\t\t(1 << 8)\n-#define ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED\t\t(1 << 9)\n+#define ELINK_FEATURE_CONFIG_AUTOGREEN_ENABLED\t\t(1 << 9)\n #define ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED\t(1 << 10)\n #define ELINK_FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET\t\t(1 << 11)\n #define ELINK_FEATURE_CONFIG_IEEE_PHY_TEST\t\t\t(1 << 12)\ndiff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c\nindex f53f8632..7bcf36c9 100644\n--- a/drivers/net/bnxt/bnxt_hwrm.c\n+++ b/drivers/net/bnxt/bnxt_hwrm.c\n@@ -3727,7 +3727,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)\n \tint rc;\n \n \tif (!BNXT_PF(bp)) {\n-\t\tPMD_DRV_LOG(ERR, \"Attempt to allcoate VFs on a VF!\\n\");\n+\t\tPMD_DRV_LOG(ERR, \"Attempt to allocate VFs on a VF!\\n\");\n \t\treturn -EINVAL;\n \t}\n \ndiff --git a/drivers/net/bnxt/tf_core/tfp.c b/drivers/net/bnxt/tf_core/tfp.c\nindex a4b09346..a967a9cc 100644\n--- a/drivers/net/bnxt/tf_core/tfp.c\n+++ b/drivers/net/bnxt/tf_core/tfp.c\n@@ -52,7 +52,7 @@ tfp_send_msg_direct(struct bnxt *bp,\n }\n \n /**\n- * Allocates zero'ed memory from the heap.\n+ * Allocates zeroed memory from the heap.\n  *\n  * Returns success or failure code.\n  */\ndiff --git a/drivers/net/bnxt/tf_core/tfp.h b/drivers/net/bnxt/tf_core/tfp.h\nindex dd0a3470..5a99c7a0 100644\n--- a/drivers/net/bnxt/tf_core/tfp.h\n+++ b/drivers/net/bnxt/tf_core/tfp.h\n@@ -150,7 +150,7 @@ tfp_msg_hwrm_oem_cmd(struct tf *tfp,\n \t\t     uint32_t max_flows);\n \n /**\n- * Allocates zero'ed memory from the heap.\n+ * Allocates zeroed memory from the heap.\n  *\n  * NOTE: Also performs virt2phy address conversion by default thus is\n  * can be expensive to invoke.\ndiff --git a/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c b/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c\nindex b09ccced..27f42d8c 100644\n--- a/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c\n+++ b/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c\n@@ -102,9 +102,9 @@ int32_t bnxt_rss_config_action_apply(struct bnxt_ulp_mapper_parms *parms)\n \n #define ULP_FILE_PATH_SIZE 256\n \n-static int32_t glob_error_fn(const char *epath, int32_t eerrno)\n+static int32_t glob_error_fn(const char *epath, int32_t errno)\n {\n-\tBNXT_TF_DBG(ERR, \"path %s error %d\\n\", epath, eerrno);\n+\tBNXT_TF_DBG(ERR, \"path %s error %d\\n\", epath, errno);\n \treturn 0;\n }\n \ndiff --git a/drivers/net/bonding/eth_bond_8023ad_private.h b/drivers/net/bonding/eth_bond_8023ad_private.h\nindex 9b5738af..a5e1fffe 100644\n--- a/drivers/net/bonding/eth_bond_8023ad_private.h\n+++ b/drivers/net/bonding/eth_bond_8023ad_private.h\n@@ -20,7 +20,7 @@\n /** Maximum number of LACP packets from one slave queued in TX ring. */\n #define BOND_MODE_8023AX_SLAVE_TX_PKTS        1\n /**\n- * Timeouts deffinitions (5.4.4 in 802.1AX documentation).\n+ * Timeouts definitions (5.4.4 in 802.1AX documentation).\n  */\n #define BOND_8023AD_FAST_PERIODIC_MS                900\n #define BOND_8023AD_SLOW_PERIODIC_MS              29000\ndiff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h\nindex 8b104b63..9626b26d 100644\n--- a/drivers/net/bonding/eth_bond_private.h\n+++ b/drivers/net/bonding/eth_bond_private.h\n@@ -139,7 +139,7 @@ struct bond_dev_private {\n \n \tuint16_t slave_count;\t\t\t/**< Number of bonded slaves */\n \tstruct bond_slave_details slaves[RTE_MAX_ETHPORTS];\n-\t/**< Arary of bonded slaves details */\n+\t/**< Array of bonded slaves details */\n \n \tstruct mode8023ad_private mode4;\n \tuint16_t tlb_slaves_order[RTE_MAX_ETHPORTS];\ndiff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c\nindex ca50583d..b3cddd8a 100644\n--- a/drivers/net/bonding/rte_eth_bond_8023ad.c\n+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c\n@@ -243,7 +243,7 @@ record_default(struct port *port)\n {\n \t/* Record default parameters for partner. Partner admin parameters\n \t * are not implemented so set them to arbitrary default (last known) and\n-\t * mark actor that parner is in defaulted state. */\n+\t * mark actor that partner is in defaulted state. */\n \tport->partner_state = STATE_LACP_ACTIVE;\n \tACTOR_STATE_SET(port, DEFAULTED);\n }\n@@ -300,7 +300,7 @@ rx_machine(struct bond_dev_private *internals, uint16_t slave_id,\n \t\tMODE4_DEBUG(\"LACP -> CURRENT\\n\");\n \t\tBOND_PRINT_LACP(lacp);\n \t\t/* Update selected flag. If partner parameters are defaulted assume they\n-\t\t * are match. If not defaulted  compare LACP actor with ports parner\n+\t\t * are match. If not defaulted  compare LACP actor with ports partner\n \t\t * params. */\n \t\tif (!ACTOR_STATE(port, DEFAULTED) &&\n \t\t\t(ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)\n@@ -399,16 +399,16 @@ periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)\n \t\tPARTNER_STATE(port, LACP_ACTIVE);\n \n \tuint8_t is_partner_fast, was_partner_fast;\n-\t/* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */\n+\t/* No periodic is on BEGIN, LACP DISABLE or when both sides are passive */\n \tif (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {\n \t\ttimer_cancel(&port->periodic_timer);\n \t\ttimer_force_expired(&port->tx_machine_timer);\n \t\tSM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);\n \n \t\tMODE4_DEBUG(\"-> NO_PERIODIC ( %s%s%s)\\n\",\n-\t\t\tSM_FLAG(port, BEGIN) ? \"begind \" : \"\",\n+\t\t\tSM_FLAG(port, BEGIN) ? \"begin \" : \"\",\n \t\t\tSM_FLAG(port, LACP_ENABLED) ? \"\" : \"LACP disabled \",\n-\t\t\tactive ? \"LACP active \" : \"LACP pasive \");\n+\t\t\tactive ? \"LACP active \" : \"LACP passive \");\n \t\treturn;\n \t}\n \n@@ -495,10 +495,10 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id)\n \tif ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&\n \t\t!PARTNER_STATE(port, SYNCHRONIZATION)) {\n \t\t/* If in COLLECTING or DISTRIBUTING state and partner becomes out of\n-\t\t * sync transit to ATACHED state.  */\n+\t\t * sync transit to ATTACHED state.  */\n \t\tACTOR_STATE_CLR(port, DISTRIBUTING);\n \t\tACTOR_STATE_CLR(port, COLLECTING);\n-\t\t/* Clear actor sync to activate transit ATACHED in condition bellow */\n+\t\t/* Clear actor sync to activate transit ATTACHED in condition bellow */\n \t\tACTOR_STATE_CLR(port, SYNCHRONIZATION);\n \t\tMODE4_DEBUG(\"Out of sync -> ATTACHED\\n\");\n \t}\n@@ -696,7 +696,7 @@ selection_logic(struct bond_dev_private *internals, uint16_t slave_id)\n \t/* Search for aggregator suitable for this port */\n \tfor (i = 0; i < slaves_count; ++i) {\n \t\tagg = &bond_mode_8023ad_ports[slaves[i]];\n-\t\t/* Skip ports that are not aggreagators */\n+\t\t/* Skip ports that are not aggregators */\n \t\tif (agg->aggregator_port_id != slaves[i])\n \t\t\tcontinue;\n \n@@ -921,7 +921,7 @@ bond_mode_8023ad_periodic_cb(void *arg)\n \n \t\t\tSM_FLAG_SET(port, BEGIN);\n \n-\t\t\t/* LACP is disabled on half duples or link is down */\n+\t\t\t/* LACP is disabled on half duplex or link is down */\n \t\t\tif (SM_FLAG(port, LACP_ENABLED)) {\n \t\t\t\t/* If port was enabled set it to BEGIN state */\n \t\t\t\tSM_FLAG_CLR(port, LACP_ENABLED);\n@@ -1069,7 +1069,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev,\n \tport->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION;\n \tport->sm_flags = SM_FLAGS_BEGIN;\n \n-\t/* use this port as agregator */\n+\t/* use this port as aggregator */\n \tport->aggregator_port_id = slave_id;\n \n \tif (bond_mode_8023ad_register_lacp_mac(slave_id) < 0) {\ndiff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h b/drivers/net/bonding/rte_eth_bond_8023ad.h\nindex 11a71a55..7eb392f8 100644\n--- a/drivers/net/bonding/rte_eth_bond_8023ad.h\n+++ b/drivers/net/bonding/rte_eth_bond_8023ad.h\n@@ -68,7 +68,7 @@ struct port_params {\n \tstruct rte_ether_addr system;\n \t/**< System ID - Slave MAC address, same as bonding MAC address */\n \tuint16_t key;\n-\t/**< Speed information (implementation dependednt) and duplex. */\n+\t/**< Speed information (implementation dependent) and duplex. */\n \tuint16_t port_priority;\n \t/**< Priority of this (unused in current implementation) */\n \tuint16_t port_number;\n@@ -317,7 +317,7 @@ rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port_id);\n  * @param port_id Bonding device id\n  *\n  * @return\n- *   agregator mode on success, negative value otherwise\n+ *   aggregator mode on success, negative value otherwise\n  */\n int\n rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id);\ndiff --git a/drivers/net/bonding/rte_eth_bond_alb.h b/drivers/net/bonding/rte_eth_bond_alb.h\nindex 386e70c5..4e9aeda9 100644\n--- a/drivers/net/bonding/rte_eth_bond_alb.h\n+++ b/drivers/net/bonding/rte_eth_bond_alb.h\n@@ -96,7 +96,7 @@ bond_mode_alb_arp_xmit(struct rte_ether_hdr *eth_h, uint16_t offset,\n  * @param internals\t\tBonding data.\n  *\n  * @return\n- * Index of slawe on which packet should be sent.\n+ * Index of slave on which packet should be sent.\n  */\n uint16_t\n bond_mode_alb_arp_upd(struct client_data *client_info,\ndiff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c\nindex 84943cff..2d5cac6c 100644\n--- a/drivers/net/bonding/rte_eth_bond_api.c\n+++ b/drivers/net/bonding/rte_eth_bond_api.c\n@@ -375,7 +375,7 @@ eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals,\n \t * value. Thus, the new internal value of default Rx queue offloads\n \t * has to be masked by rx_queue_offload_capa to make sure that only\n \t * commonly supported offloads are preserved from both the previous\n-\t * value and the value being inhereted from the new slave device.\n+\t * value and the value being inherited from the new slave device.\n \t */\n \trxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) &\n \t\t\t     internals->rx_queue_offload_capa;\n@@ -413,7 +413,7 @@ eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals,\n \t * value. Thus, the new internal value of default Tx queue offloads\n \t * has to be masked by tx_queue_offload_capa to make sure that only\n \t * commonly supported offloads are preserved from both the previous\n-\t * value and the value being inhereted from the new slave device.\n+\t * value and the value being inherited from the new slave device.\n \t */\n \ttxconf_i->offloads = (txconf_i->offloads | txconf->offloads) &\n \t\t\t     internals->tx_queue_offload_capa;\ndiff --git a/drivers/net/cnxk/cn10k_ethdev.h b/drivers/net/cnxk/cn10k_ethdev.h\nindex c2a46ad7..0982158c 100644\n--- a/drivers/net/cnxk/cn10k_ethdev.h\n+++ b/drivers/net/cnxk/cn10k_ethdev.h\n@@ -53,7 +53,7 @@ struct cn10k_outb_priv_data {\n \tvoid *userdata;\n \t/* Rlen computation data */\n \tstruct cnxk_ipsec_outb_rlens rlens;\n-\t/* Back pinter to eth sec session */\n+\t/* Back pointer to eth sec session */\n \tstruct cnxk_eth_sec_sess *eth_sec;\n \t/* SA index */\n \tuint32_t sa_idx;\ndiff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h\nindex 873e1871..c7d442fc 100644\n--- a/drivers/net/cnxk/cn10k_tx.h\n+++ b/drivers/net/cnxk/cn10k_tx.h\n@@ -736,7 +736,7 @@ cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd,\n \t\t\t/* Retrieving the default desc values */\n \t\t\tlmt[off] = cmd[2];\n \n-\t\t\t/* Using compiler barier to avoid voilation of C\n+\t\t\t/* Using compiler barrier to avoid violation of C\n \t\t\t * aliasing rules.\n \t\t\t */\n \t\t\trte_compiler_barrier();\n@@ -745,7 +745,7 @@ cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd,\n \t\t/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp\n \t\t * should not be recorded, hence changing the alg type to\n \t\t * NIX_SENDMEMALG_SET and also changing send mem addr field to\n-\t\t * next 8 bytes as it corrpt the actual tx tstamp registered\n+\t\t * next 8 bytes as it corrupts the actual tx tstamp registered\n \t\t * address.\n \t\t */\n \t\tsend_mem->w0.subdc = NIX_SUBDC_MEM;\n@@ -2254,7 +2254,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t}\n \n \t\tif (flags & NIX_TX_OFFLOAD_TSTAMP_F) {\n-\t\t\t/* Tx ol_flag for timestam. */\n+\t\t\t/* Tx ol_flag for timestamp. */\n \t\t\tconst uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,\n \t\t\t\t\t\tRTE_MBUF_F_TX_IEEE1588_TMST};\n \t\t\t/* Set send mem alg to SUB. */\ndiff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h\nindex 435dde13..f5e5e555 100644\n--- a/drivers/net/cnxk/cn9k_tx.h\n+++ b/drivers/net/cnxk/cn9k_tx.h\n@@ -304,7 +304,7 @@ cn9k_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,\n \t\t\t/* Retrieving the default desc values */\n \t\t\tcmd[off] = send_mem_desc[6];\n \n-\t\t\t/* Using compiler barier to avoid voilation of C\n+\t\t\t/* Using compiler barrier to avoid violation of C\n \t\t\t * aliasing rules.\n \t\t\t */\n \t\t\trte_compiler_barrier();\n@@ -313,7 +313,7 @@ cn9k_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,\n \t\t/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp\n \t\t * should not be recorded, hence changing the alg type to\n \t\t * NIX_SENDMEMALG_SET and also changing send mem addr field to\n-\t\t * next 8 bytes as it corrpt the actual tx tstamp registered\n+\t\t * next 8 bytes as it corrupts the actual tx tstamp registered\n \t\t * address.\n \t\t */\n \t\tsend_mem->w0.cn9k.alg =\n@@ -1531,7 +1531,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t}\n \n \t\tif (flags & NIX_TX_OFFLOAD_TSTAMP_F) {\n-\t\t\t/* Tx ol_flag for timestam. */\n+\t\t\t/* Tx ol_flag for timestamp. */\n \t\t\tconst uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,\n \t\t\t\t\t\tRTE_MBUF_F_TX_IEEE1588_TMST};\n \t\t\t/* Set send mem alg to SUB. */\ndiff --git a/drivers/net/cnxk/cnxk_ptp.c b/drivers/net/cnxk/cnxk_ptp.c\nindex 139fea25..359f9a30 100644\n--- a/drivers/net/cnxk/cnxk_ptp.c\n+++ b/drivers/net/cnxk/cnxk_ptp.c\n@@ -12,7 +12,7 @@ cnxk_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock)\n \t/* This API returns the raw PTP HI clock value. Since LFs do not\n \t * have direct access to PTP registers and it requires mbox msg\n \t * to AF for this value. In fastpath reading this value for every\n-\t * packet (which involes mbox call) becomes very expensive, hence\n+\t * packet (which involves mbox call) becomes very expensive, hence\n \t * we should be able to derive PTP HI clock value from tsc by\n \t * using freq_mult and clk_delta calculated during configure stage.\n \t */\ndiff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c\nindex edcbba9d..6e460dfe 100644\n--- a/drivers/net/cxgbe/cxgbe_flow.c\n+++ b/drivers/net/cxgbe/cxgbe_flow.c\n@@ -1378,7 +1378,7 @@ cxgbe_flow_validate(struct rte_eth_dev *dev,\n }\n \n /*\n- * @ret : > 0 filter destroyed succsesfully\n+ * @ret : > 0 filter destroyed successfully\n  *        < 0 error destroying filter\n  *        == 1 filter not active / not found\n  */\ndiff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c\nindex f639612a..d0c93f8a 100644\n--- a/drivers/net/cxgbe/cxgbevf_main.c\n+++ b/drivers/net/cxgbe/cxgbevf_main.c\n@@ -44,7 +44,7 @@ static void size_nports_qsets(struct adapter *adapter)\n \t */\n \tpmask_nports = hweight32(adapter->params.vfres.pmask);\n \tif (pmask_nports < adapter->params.nports) {\n-\t\tdev_warn(adapter->pdev_dev, \"only using %d of %d provissioned\"\n+\t\tdev_warn(adapter->pdev_dev, \"only using %d of %d provisioned\"\n \t\t\t \" virtual interfaces; limited by Port Access Rights\"\n \t\t\t \" mask %#x\\n\", pmask_nports, adapter->params.nports,\n \t\t\t adapter->params.vfres.pmask);\ndiff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c\nindex f623f3e6..1c76b8e4 100644\n--- a/drivers/net/cxgbe/sge.c\n+++ b/drivers/net/cxgbe/sge.c\n@@ -211,7 +211,7 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)\n  * @fl: the Free List\n  *\n  * Tests specified Free List to see whether the number of buffers\n- * available to the hardware has falled below our \"starvation\"\n+ * available to the hardware has fallen below our \"starvation\"\n  * threshold.\n  */\n static inline bool fl_starving(const struct adapter *adapter,\n@@ -678,7 +678,7 @@ static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,\n  * @q: the Tx queue\n  * @n: number of new descriptors to give to HW\n  *\n- * Ring the doorbel for a Tx queue.\n+ * Ring the doorbell for a Tx queue.\n  */\n static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q)\n {\n@@ -877,7 +877,7 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,\n }\n \n /**\n- * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not\n+ * should_tx_packet_coalesce - decides whether to coalesce an mbuf or not\n  * @txq: tx queue where the mbuf is sent\n  * @mbuf: mbuf to be sent\n  * @nflits: return value for number of flits needed\n@@ -1846,7 +1846,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,\n \t\t * for its status page) along with the associated software\n \t\t * descriptor ring.  The free list size needs to be a multiple\n \t\t * of the Egress Queue Unit and at least 2 Egress Units larger\n-\t\t * than the SGE's Egress Congrestion Threshold\n+\t\t * than the SGE's Egress Congestion Threshold\n \t\t * (fl_starve_thres - 1).\n \t\t */\n \t\tif (fl->size < s->fl_starve_thres - 1 + 2 * 8)\ndiff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c\nindex e49f7654..2c2c4e4e 100644\n--- a/drivers/net/dpaa/dpaa_ethdev.c\n+++ b/drivers/net/dpaa/dpaa_ethdev.c\n@@ -1030,7 +1030,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t\t   QM_FQCTRL_CTXASTASHING |\n \t\t\t\t   QM_FQCTRL_PREFERINCACHE;\n \t\topts.fqd.context_a.stashing.exclusive = 0;\n-\t\t/* In muticore scenario stashing becomes a bottleneck on LS1046.\n+\t\t/* In multicore scenario stashing becomes a bottleneck on LS1046.\n \t\t * So do not enable stashing in this case\n \t\t */\n \t\tif (dpaa_svr_family != SVR_LS1046A_FAMILY)\n@@ -1866,7 +1866,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)\n \n \tdpaa_intf->name = dpaa_device->name;\n \n-\t/* save fman_if & cfg in the interface struture */\n+\t/* save fman_if & cfg in the interface structure */\n \teth_dev->process_private = fman_intf;\n \tdpaa_intf->ifid = dev_id;\n \tdpaa_intf->cfg = cfg;\n@@ -2169,7 +2169,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,\n \t\tif (dpaa_svr_family == SVR_LS1043A_FAMILY)\n \t\t\tdpaa_push_mode_max_queue = 0;\n \n-\t\t/* if push mode queues to be enabled. Currenly we are allowing\n+\t\t/* if push mode queues to be enabled. Currently we are allowing\n \t\t * only one queue per thread.\n \t\t */\n \t\tif (getenv(\"DPAA_PUSH_QUEUES_NUMBER\")) {\ndiff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c\nindex ffac6ce3..956fe946 100644\n--- a/drivers/net/dpaa/dpaa_rxtx.c\n+++ b/drivers/net/dpaa/dpaa_rxtx.c\n@@ -600,8 +600,8 @@ void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)\n \tvoid *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd));\n \n \t/* In case of LS1046, annotation stashing is disabled due to L2 cache\n-\t * being bottleneck in case of multicore scanario for this platform.\n-\t * So we prefetch the annoation beforehand, so that it is available\n+\t * being bottleneck in case of multicore scenario for this platform.\n+\t * So we prefetch the annotation beforehand, so that it is available\n \t * in cache when accessed.\n \t */\n \trte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));\ndiff --git a/drivers/net/dpaa/fmlib/fm_ext.h b/drivers/net/dpaa/fmlib/fm_ext.h\nindex 27c9fb47..8e7153bd 100644\n--- a/drivers/net/dpaa/fmlib/fm_ext.h\n+++ b/drivers/net/dpaa/fmlib/fm_ext.h\n@@ -176,7 +176,7 @@ typedef struct t_fm_prs_result {\n #define FM_FD_ERR_PRS_HDR_ERR\t0x00000020\n \t\t/**< Header error was identified during parsing */\n #define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED  0x00000008\n-\t\t\t/**< Frame parsed beyind 256 first bytes */\n+\t\t\t/**< Frame parsed beyond 256 first bytes */\n \n #define FM_FD_TX_STATUS_ERR_MASK\t(FM_FD_ERR_UNSUPPORTED_FORMAT   | \\\n \t\t\t\t\tFM_FD_ERR_LENGTH\t\t| \\\ndiff --git a/drivers/net/dpaa/fmlib/fm_pcd_ext.h b/drivers/net/dpaa/fmlib/fm_pcd_ext.h\nindex 8be3885f..3802b429 100644\n--- a/drivers/net/dpaa/fmlib/fm_pcd_ext.h\n+++ b/drivers/net/dpaa/fmlib/fm_pcd_ext.h\n@@ -276,7 +276,7 @@ typedef struct ioc_fm_pcd_counters_params_t {\n } ioc_fm_pcd_counters_params_t;\n \n /*\n- * @Description   structure for FM exception definitios\n+ * @Description   structure for FM exception definitions\n  */\n typedef struct ioc_fm_pcd_exception_params_t {\n \tioc_fm_pcd_exceptions exception;\t/**< The requested exception */\n@@ -883,7 +883,7 @@ typedef enum ioc_fm_pcd_manip_hdr_rmv_specific_l2 {\n \te_IOC_FM_PCD_MANIP_HDR_RMV_ETHERNET,\t/**< Ethernet/802.3 MAC */\n \te_IOC_FM_PCD_MANIP_HDR_RMV_STACKED_QTAGS,\t/**< stacked QTags */\n \te_IOC_FM_PCD_MANIP_HDR_RMV_ETHERNET_AND_MPLS,\n-\t\t\t/**< MPLS and Ethernet/802.3 MAC header unitl the header\n+\t\t\t/**< MPLS and Ethernet/802.3 MAC header until the header\n \t\t\t * which follows the MPLS header\n \t\t\t */\n \te_IOC_FM_PCD_MANIP_HDR_RMV_MPLS\n@@ -3293,7 +3293,7 @@ typedef struct ioc_fm_pcd_cc_tbl_get_stats_t {\n /*\n  * @Function\t  fm_pcd_net_env_characteristics_delete\n  *\n- * @Description   Deletes a set of Network Environment Charecteristics.\n+ * @Description   Deletes a set of Network Environment Characteristics.\n  *\n  * @Param[in]\t  ioc_fm_obj_t\t\tThe id of a Network Environment object.\n  *\n@@ -3493,7 +3493,7 @@ typedef struct ioc_fm_pcd_cc_tbl_get_stats_t {\n  * @Return\t  0 on success; Error code otherwise.\n  *\n  * @Cautions\t  Allowed only following fm_pcd_match_table_set() not only of\n- *\t\t  the relevnt node but also the node that points to this node.\n+ *\t\t  the relevant node but also the node that points to this node.\n  */\n #define FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE \\\n \t\t_IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(35), \\\ndiff --git a/drivers/net/dpaa/fmlib/fm_port_ext.h b/drivers/net/dpaa/fmlib/fm_port_ext.h\nindex 6f5479fb..abdec961 100644\n--- a/drivers/net/dpaa/fmlib/fm_port_ext.h\n+++ b/drivers/net/dpaa/fmlib/fm_port_ext.h\n@@ -177,7 +177,7 @@ typedef enum ioc_fm_port_counters {\n \t\t\t\t/**< BMI OP & HC only statistics counter */\n \te_IOC_FM_PORT_COUNTERS_LENGTH_ERR,\n \t\t\t\t/**< BMI non-Rx statistics counter */\n-\te_IOC_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT,\n+\te_IOC_FM_PORT_COUNTERS_UNSUPPORTED_FORMAT,\n \t\t\t\t/**< BMI non-Rx statistics counter */\n \te_IOC_FM_PORT_COUNTERS_DEQ_TOTAL,/**< QMI total QM dequeues counter */\n \te_IOC_FM_PORT_COUNTERS_ENQ_TOTAL,/**< QMI total QM enqueues counter */\n@@ -498,7 +498,7 @@ typedef struct ioc_fm_port_pcd_prs_params_t {\n \t\t/**< Number of bytes from beginning of packet to start parsing\n \t\t */\n \tioc_net_header_type\tfirst_prs_hdr;\n-\t\t/**< The type of the first header axpected at 'parsing_offset'\n+\t\t/**< The type of the first header expected at 'parsing_offset'\n \t\t */\n \tbool\t\tinclude_in_prs_statistics;\n \t\t/**< TRUE to include this port in the parser statistics */\n@@ -524,7 +524,7 @@ typedef struct ioc_fm_port_pcd_prs_params_t {\n } ioc_fm_port_pcd_prs_params_t;\n \n /*\n- * @Description   A structure for defining coarse alassification parameters\n+ * @Description   A structure for defining coarse classification parameters\n  *\t\t  (Must match t_fm_portPcdCcParams defined in fm_port_ext.h)\n  */\n typedef struct ioc_fm_port_pcd_cc_params_t {\n@@ -602,7 +602,7 @@ typedef struct ioc_fm_pcd_prs_start_t {\n \t\t/**< Number of bytes from beginning of packet to start parsing\n \t\t */\n \tioc_net_header_type first_prs_hdr;\n-\t\t/**< The type of the first header axpected at 'parsing_offset'\n+\t\t/**< The type of the first header expected at 'parsing_offset'\n \t\t */\n } ioc_fm_pcd_prs_start_t;\n \n@@ -1356,7 +1356,7 @@ typedef uint32_t\tfm_port_frame_err_select_t;\n #define FM_PORT_FRM_ERR_PRS_HDR_ERR\tFM_FD_ERR_PRS_HDR_ERR\n \t\t\t/**< Header error was identified during parsing */\n #define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED\tFM_FD_ERR_BLOCK_LIMIT_EXCEEDED\n-\t\t\t/**< Frame parsed beyind 256 first bytes */\n+\t\t\t/**< Frame parsed beyond 256 first bytes */\n #define FM_PORT_FRM_ERR_PROCESS_TIMEOUT\t0x00000001\n \t\t\t/**< FPM Frame Processing Timeout Exceeded */\n /* @} */\n@@ -1390,7 +1390,7 @@ typedef void (t_fm_port_exception_callback) (t_handle h_app,\n  * @Param[in]\t  length\tlength of received data\n  * @Param[in]\t  status\treceive status and errors\n  * @Param[in]\t  position\tposition of buffer in frame\n- * @Param[in]\t  h_buf_context\tA handle of the user acossiated with this buffer\n+ * @Param[in]\t  h_buf_context\tA handle of the user associated with this buffer\n  *\n  * @Retval\t  e_RX_STORE_RESPONSE_CONTINUE\n  *\t\t  order the driver to continue Rx operation for all ready data.\n@@ -1414,7 +1414,7 @@ typedef e_rx_store_response(t_fm_port_im_rx_store_callback) (t_handle h_app,\n  * @Param[in]\t  p_data\tA pointer to data received\n  * @Param[in]\t  status\ttransmit status and errors\n  * @Param[in]\t  last_buffer\tis last buffer in frame\n- * @Param[in]\t  h_buf_context\tA handle of the user acossiated with this buffer\n+ * @Param[in]\t  h_buf_context\tA handle of the user associated with this buffer\n  */\n typedef void (t_fm_port_im_tx_conf_callback) (t_handle   h_app,\n \t\t\t\tuint8_t\t*p_data,\n@@ -2538,7 +2538,7 @@ typedef enum e_fm_port_counters {\n \t\t\t/**< BMI OP & HC only statistics counter */\n \te_FM_PORT_COUNTERS_LENGTH_ERR,\n \t\t\t/**< BMI non-Rx statistics counter */\n-\te_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT,\n+\te_FM_PORT_COUNTERS_UNSUPPORTED_FORMAT,\n \t\t\t/**< BMI non-Rx statistics counter */\n \te_FM_PORT_COUNTERS_DEQ_TOTAL,\t/**< QMI total QM dequeues counter */\n \te_FM_PORT_COUNTERS_ENQ_TOTAL,\t/**< QMI total QM enqueues counter */\n@@ -2585,7 +2585,7 @@ typedef struct t_fm_port_congestion_grps {\n \tbool\tpfc_prio_enable[FM_NUM_CONG_GRPS][FM_MAX_PFC_PRIO];\n \t\t\t/**< a matrix that represents the map between the CG ids\n \t\t\t * defined in 'congestion_grps_to_consider' to the\n-\t\t\t * priorties mapping array.\n+\t\t\t * priorities mapping array.\n \t\t\t */\n } t_fm_port_congestion_grps;\n \ndiff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c\nindex a3706439..f561dcc1 100644\n--- a/drivers/net/dpaa2/dpaa2_ethdev.c\n+++ b/drivers/net/dpaa2/dpaa2_ethdev.c\n@@ -143,7 +143,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)\n \tPMD_INIT_FUNC_TRACE();\n \n \tif (mask & RTE_ETH_VLAN_FILTER_MASK) {\n-\t\t/* VLAN Filter not avaialble */\n+\t\t/* VLAN Filter not available */\n \t\tif (!priv->max_vlan_filters) {\n \t\t\tDPAA2_PMD_INFO(\"VLAN filter not available\");\n \t\t\treturn -ENOTSUP;\n@@ -916,7 +916,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \t\tcong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;\n \t\tcong_notif_cfg.threshold_entry = nb_tx_desc;\n \t\t/* Notify that the queue is not congested when the data in\n-\t\t * the queue is below this thershold.(90% of value)\n+\t\t * the queue is below this threshold.(90% of value)\n \t\t */\n \t\tcong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10;\n \t\tcong_notif_cfg.message_ctx = 0;\n@@ -1058,7 +1058,7 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)\n  * Dpaa2 link Interrupt handler\n  *\n  * @param param\n- *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ *  The address of parameter (struct rte_eth_dev *) registered before.\n  *\n  * @return\n  *  void\n@@ -2236,7 +2236,7 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,\n \t\tocfg.oa = 1;\n \t\t/* Late arrival window size disabled */\n \t\tocfg.olws = 0;\n-\t\t/* ORL resource exhaustaion advance NESN disabled */\n+\t\t/* ORL resource exhaustion advance NESN disabled */\n \t\tocfg.oeane = 0;\n \t\t/* Loose ordering enabled */\n \t\tocfg.oloe = 1;\n@@ -2720,13 +2720,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)\n \t}\n \teth_dev->tx_pkt_burst = dpaa2_dev_tx;\n \n-\t/*Init fields w.r.t. classficaition*/\n+\t/*Init fields w.r.t. classification*/\n \tmemset(&priv->extract.qos_key_extract, 0,\n \t\tsizeof(struct dpaa2_key_extract));\n \tpriv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);\n \tif (!priv->extract.qos_extract_param) {\n \t\tDPAA2_PMD_ERR(\" Error(%d) in allocation resources for flow \"\n-\t\t\t    \" classificaiton \", ret);\n+\t\t\t    \" classification \", ret);\n \t\tgoto init_err;\n \t}\n \tpriv->extract.qos_key_extract.key_info.ipv4_src_offset =\n@@ -2744,7 +2744,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)\n \t\tpriv->extract.tc_extract_param[i] =\n \t\t\t(size_t)rte_malloc(NULL, 256, 64);\n \t\tif (!priv->extract.tc_extract_param[i]) {\n-\t\t\tDPAA2_PMD_ERR(\" Error(%d) in allocation resources for flow classificaiton\",\n+\t\t\tDPAA2_PMD_ERR(\" Error(%d) in allocation resources for flow classification\",\n \t\t\t\t     ret);\n \t\t\tgoto init_err;\n \t\t}\ndiff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h\nindex c5e9267b..1c5569d0 100644\n--- a/drivers/net/dpaa2/dpaa2_ethdev.h\n+++ b/drivers/net/dpaa2/dpaa2_ethdev.h\n@@ -117,7 +117,7 @@ extern int dpaa2_timestamp_dynfield_offset;\n \n #define DPAA2_FLOW_MAX_KEY_SIZE\t\t16\n \n-/*Externaly defined*/\n+/*Externally defined*/\n extern const struct rte_flow_ops dpaa2_flow_ops;\n \n extern const struct rte_tm_ops dpaa2_tm_ops;\ndiff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c\nindex 84fe37a7..8a14eb95 100644\n--- a/drivers/net/dpaa2/dpaa2_flow.c\n+++ b/drivers/net/dpaa2/dpaa2_flow.c\n@@ -1341,7 +1341,7 @@ dpaa2_configure_flow_vlan(struct rte_flow *flow,\n }\n \n static int\n-dpaa2_configure_flow_ip_discrimation(\n+dpaa2_configure_flow_ip_discrimination(\n \tstruct dpaa2_dev_priv *priv, struct rte_flow *flow,\n \tconst struct rte_flow_item *pattern,\n \tint *local_cfg,\tint *device_configured,\n@@ -1447,11 +1447,11 @@ dpaa2_configure_flow_generic_ip(\n \tflow->tc_id = group;\n \tflow->tc_index = attr->priority;\n \n-\tret = dpaa2_configure_flow_ip_discrimation(priv,\n+\tret = dpaa2_configure_flow_ip_discrimination(priv,\n \t\t\tflow, pattern, &local_cfg,\n \t\t\tdevice_configured, group);\n \tif (ret) {\n-\t\tDPAA2_PMD_ERR(\"IP discrimation failed!\");\n+\t\tDPAA2_PMD_ERR(\"IP discrimination failed!\");\n \t\treturn -1;\n \t}\n \n@@ -3349,7 +3349,7 @@ dpaa2_flow_verify_action(\n \t\t\t\t\t(actions[j].conf);\n \t\t\tif (rss_conf->queue_num > priv->dist_queues) {\n \t\t\t\tDPAA2_PMD_ERR(\n-\t\t\t\t\t\"RSS number exceeds the distrbution size\");\n+\t\t\t\t\t\"RSS number exceeds the distribution size\");\n \t\t\t\treturn -ENOTSUP;\n \t\t\t}\n \t\t\tfor (i = 0; i < (int)rss_conf->queue_num; i++) {\n@@ -3596,7 +3596,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,\n \t\t\t\tqos_cfg.keep_entries = true;\n \t\t\t\tqos_cfg.key_cfg_iova =\n \t\t\t\t\t(size_t)priv->extract.qos_extract_param;\n-\t\t\t\t/* QoS table is effecitive for multiple TCs.*/\n+\t\t\t\t/* QoS table is effective for multiple TCs.*/\n \t\t\t\tif (priv->num_rx_tc > 1) {\n \t\t\t\t\tret = dpni_set_qos_table(dpni, CMD_PRI_LOW,\n \t\t\t\t\t\tpriv->token, &qos_cfg);\n@@ -3655,7 +3655,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,\n \t\t\t\t\t\t0, 0);\n \t\t\t\tif (ret < 0) {\n \t\t\t\t\tDPAA2_PMD_ERR(\n-\t\t\t\t\t\t\"Error in addnig entry to QoS table(%d)\", ret);\n+\t\t\t\t\t\t\"Error in adding entry to QoS table(%d)\", ret);\n \t\t\t\t\treturn ret;\n \t\t\t\t}\n \t\t\t}\ndiff --git a/drivers/net/dpaa2/dpaa2_mux.c b/drivers/net/dpaa2/dpaa2_mux.c\nindex d347f4df..f54ab5df 100644\n--- a/drivers/net/dpaa2/dpaa2_mux.c\n+++ b/drivers/net/dpaa2/dpaa2_mux.c\n@@ -95,7 +95,7 @@ rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,\n \tmask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE);\n \n \t/* Currently taking only IP protocol as an extract type.\n-\t * This can be exended to other fields using pattern->type.\n+\t * This can be extended to other fields using pattern->type.\n \t */\n \tmemset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg));\n \n@@ -311,11 +311,11 @@ dpaa2_create_dpdmux_device(int vdev_fd __rte_unused,\n \t\tgoto init_err;\n \t}\n \n-\t/* The new dpdmux_set/get_resetable() API are available starting with\n+\t/* The new dpdmux_set/get_resettable() API are available starting with\n \t * DPDMUX_VER_MAJOR==6 and DPDMUX_VER_MINOR==6\n \t */\n \tif (maj_ver >= 6 && min_ver >= 6) {\n-\t\tret = dpdmux_set_resetable(&dpdmux_dev->dpdmux, CMD_PRI_LOW,\n+\t\tret = dpdmux_set_resettable(&dpdmux_dev->dpdmux, CMD_PRI_LOW,\n \t\t\t\tdpdmux_dev->token,\n \t\t\t\tDPDMUX_SKIP_DEFAULT_INTERFACE |\n \t\t\t\tDPDMUX_SKIP_UNICAST_RULES |\ndiff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c\nindex c65589a5..90b971b4 100644\n--- a/drivers/net/dpaa2/dpaa2_rxtx.c\n+++ b/drivers/net/dpaa2/dpaa2_rxtx.c\n@@ -714,7 +714,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \trte_prefetch0((void *)(size_t)(dq_storage + 1));\n \n \t/* Prepare next pull descriptor. This will give space for the\n-\t * prefething done on DQRR entries\n+\t * prefetching done on DQRR entries\n \t */\n \tq_storage->toggle ^= 1;\n \tdq_storage1 = q_storage->dq_storage[q_storage->toggle];\n@@ -1510,7 +1510,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\t\tif (*dpaa2_seqn(*bufs)) {\n \t\t\t\t/* Use only queue 0 for Tx in case of atomic/\n \t\t\t\t * ordered packets as packets can get unordered\n-\t\t\t\t * when being tranmitted out from the interface\n+\t\t\t\t * when being transmitted out from the interface\n \t\t\t\t */\n \t\t\t\tdpaa2_set_enqueue_descriptor(order_sendq,\n \t\t\t\t\t\t\t     (*bufs),\n@@ -1738,7 +1738,7 @@ dpaa2_dev_loopback_rx(void *queue,\n \trte_prefetch0((void *)(size_t)(dq_storage + 1));\n \n \t/* Prepare next pull descriptor. This will give space for the\n-\t * prefething done on DQRR entries\n+\t * prefetching done on DQRR entries\n \t */\n \tq_storage->toggle ^= 1;\n \tdq_storage1 = q_storage->dq_storage[q_storage->toggle];\ndiff --git a/drivers/net/dpaa2/mc/dpdmux.c b/drivers/net/dpaa2/mc/dpdmux.c\nindex edbb01b4..693557e1 100644\n--- a/drivers/net/dpaa2/mc/dpdmux.c\n+++ b/drivers/net/dpaa2/mc/dpdmux.c\n@@ -281,7 +281,7 @@ int dpdmux_reset(struct fsl_mc_io *mc_io,\n }\n \n /**\n- * dpdmux_set_resetable() - Set overall resetable DPDMUX parameters.\n+ * dpdmux_set_resettable() - Set overall resettable DPDMUX parameters.\n  * @mc_io:\tPointer to MC portal's I/O object\n  * @cmd_flags:\tCommand flags; one or more of 'MC_CMD_FLAG_'\n  * @token:\tToken of DPDMUX object\n@@ -299,7 +299,7 @@ int dpdmux_reset(struct fsl_mc_io *mc_io,\n  *\n  * Return:\t'0' on Success; Error code otherwise.\n  */\n-int dpdmux_set_resetable(struct fsl_mc_io *mc_io,\n+int dpdmux_set_resettable(struct fsl_mc_io *mc_io,\n \t\t\t\t  uint32_t cmd_flags,\n \t\t\t\t  uint16_t token,\n \t\t\t\t  uint8_t skip_reset_flags)\n@@ -321,7 +321,7 @@ int dpdmux_set_resetable(struct fsl_mc_io *mc_io,\n }\n \n /**\n- * dpdmux_get_resetable() - Get overall resetable parameters.\n+ * dpdmux_get_resettable() - Get overall resettable parameters.\n  * @mc_io:\tPointer to MC portal's I/O object\n  * @cmd_flags:\tCommand flags; one or more of 'MC_CMD_FLAG_'\n  * @token:\tToken of DPDMUX object\n@@ -334,7 +334,7 @@ int dpdmux_set_resetable(struct fsl_mc_io *mc_io,\n  *\n  * Return:\t'0' on Success; Error code otherwise.\n  */\n-int dpdmux_get_resetable(struct fsl_mc_io *mc_io,\n+int dpdmux_get_resettable(struct fsl_mc_io *mc_io,\n \t\t\t\t  uint32_t cmd_flags,\n \t\t\t\t  uint16_t token,\n \t\t\t\t  uint8_t *skip_reset_flags)\ndiff --git a/drivers/net/dpaa2/mc/fsl_dpdmux.h b/drivers/net/dpaa2/mc/fsl_dpdmux.h\nindex b01a98eb..274dcffc 100644\n--- a/drivers/net/dpaa2/mc/fsl_dpdmux.h\n+++ b/drivers/net/dpaa2/mc/fsl_dpdmux.h\n@@ -155,12 +155,12 @@ int dpdmux_reset(struct fsl_mc_io *mc_io,\n  */\n #define DPDMUX_SKIP_MULTICAST_RULES\t0x04\n \n-int dpdmux_set_resetable(struct fsl_mc_io *mc_io,\n+int dpdmux_set_resettable(struct fsl_mc_io *mc_io,\n \t\t\t\t  uint32_t cmd_flags,\n \t\t\t\t  uint16_t token,\n \t\t\t\t  uint8_t skip_reset_flags);\n \n-int dpdmux_get_resetable(struct fsl_mc_io *mc_io,\n+int dpdmux_get_resettable(struct fsl_mc_io *mc_io,\n \t\t\t\t  uint32_t cmd_flags,\n \t\t\t\t  uint16_t token,\n \t\t\t\t  uint8_t *skip_reset_flags);\ndiff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h\nindex 469ab9b3..3b9bffee 100644\n--- a/drivers/net/dpaa2/mc/fsl_dpni.h\n+++ b/drivers/net/dpaa2/mc/fsl_dpni.h\n@@ -93,7 +93,7 @@ struct fsl_mc_io;\n  */\n #define DPNI_OPT_OPR_PER_TC\t\t\t\t0x000080\n /**\n- * All Tx traffic classes will use a single sender (ignore num_queueus for tx)\n+ * All Tx traffic classes will use a single sender (ignore num_queues for tx)\n  */\n #define DPNI_OPT_SINGLE_SENDER\t\t\t0x000100\n /**\n@@ -617,7 +617,7 @@ int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,\n  * @page_3.ceetm_reject_bytes: Cumulative count of the number of bytes in all\n  *\tframes whose enqueue was rejected\n  * @page_3.ceetm_reject_frames: Cumulative count of all frame enqueues rejected\n- * @page_4: congestion point drops for seleted TC\n+ * @page_4: congestion point drops for selected TC\n  * @page_4.cgr_reject_frames: number of rejected frames due to congestion point\n  * @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point\n  * @page_5: policer statistics per TC\n@@ -1417,7 +1417,7 @@ int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io,\n  *\t\tdpkg_prepare_key_cfg()\n  * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);\n  *\t\t'0' to use the 'default_tc' in such cases\n- * @keep_entries: if set to one will not delele existing table entries. This\n+ * @keep_entries: if set to one will not delete existing table entries. This\n  *\t\toption will work properly only for dpni objects created with\n  *\t\tDPNI_OPT_HAS_KEY_MASKING option. All previous QoS entries must\n  *\t\tbe compatible with new key composition rule.\n@@ -1516,7 +1516,7 @@ int dpni_clear_qos_table(struct fsl_mc_io *mc_io,\n  * @flow_id: Identifies the Rx queue used for matching traffic.  Supported\n  *     values are in range 0 to num_queue-1.\n  * @redirect_obj_token: token that identifies the object where frame is\n- * redirected when this rule is hit. This paraneter is used only when one of the\n+ * redirected when this rule is hit. This parameter is used only when one of the\n  * flags DPNI_FS_OPT_REDIRECT_TO_DPNI_RX or DPNI_FS_OPT_REDIRECT_TO_DPNI_TX is\n  * set.\n  * The token is obtained using dpni_open() API call. The object must stay\n@@ -1797,7 +1797,7 @@ int dpni_load_sw_sequence(struct fsl_mc_io *mc_io,\n \t\t  struct dpni_load_ss_cfg *cfg);\n \n /**\n- * dpni_eanble_sw_sequence() - Enables a software sequence in the parser\n+ * dpni_enable_sw_sequence() - Enables a software sequence in the parser\n  *\t\t\t\tprofile\n  * corresponding to the ingress or egress of the DPNI.\n  * @mc_io:\tPointer to MC portal's I/O object\ndiff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h\nindex a548ae2c..718a9746 100644\n--- a/drivers/net/e1000/e1000_ethdev.h\n+++ b/drivers/net/e1000/e1000_ethdev.h\n@@ -103,7 +103,7 @@\n  * Maximum number of Ring Descriptors.\n  *\n  * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring\n- * desscriptors should meet the following condition:\n+ * descriptors should meet the following condition:\n  * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0\n  */\n #define\tE1000_MIN_RING_DESC\t32\n@@ -252,7 +252,7 @@ struct igb_rte_flow_rss_conf {\n };\n \n /*\n- * Structure to store filters'info.\n+ * Structure to store filters' info.\n  */\n struct e1000_filter_info {\n \tuint8_t ethertype_mask; /* Bit mask for every used ethertype filter */\ndiff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c\nindex 31c48700..794496ab 100644\n--- a/drivers/net/e1000/em_ethdev.c\n+++ b/drivers/net/e1000/em_ethdev.c\n@@ -1058,8 +1058,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \n \t/*\n \t * Starting with 631xESB hw supports 2 TX/RX queues per port.\n-\t * Unfortunatelly, all these nics have just one TX context.\n-\t * So we have few choises for TX:\n+\t * Unfortunately, all these nics have just one TX context.\n+\t * So we have few choices for TX:\n \t * - Use just one TX queue.\n \t * - Allow cksum offload only for one TX queue.\n \t * - Don't allow TX cksum offload at all.\n@@ -1068,7 +1068,7 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t * (Multiple Receive Queues are mutually exclusive with UDP\n \t * fragmentation and are not supported when a legacy receive\n \t * descriptor format is used).\n-\t * Which means separate RX routinies - as legacy nics (82540, 82545)\n+\t * Which means separate RX routines - as legacy nics (82540, 82545)\n \t * don't support extended RXD.\n \t * To avoid it we support just one RX queue for now (no RSS).\n \t */\n@@ -1558,7 +1558,7 @@ eth_em_interrupt_get_status(struct rte_eth_dev *dev)\n }\n \n /*\n- * It executes link_update after knowing an interrupt is prsent.\n+ * It executes link_update after knowing an interrupt is present.\n  *\n  * @param dev\n  *  Pointer to struct rte_eth_dev.\n@@ -1616,7 +1616,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,\n  * @param handle\n  *  Pointer to interrupt handle.\n  * @param param\n- *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ *  The address of parameter (struct rte_eth_dev *) registered before.\n  *\n  * @return\n  *  void\ndiff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c\nindex 39262502..cea5b490 100644\n--- a/drivers/net/e1000/em_rxtx.c\n+++ b/drivers/net/e1000/em_rxtx.c\n@@ -141,7 +141,7 @@ union em_vlan_macip {\n struct em_ctx_info {\n \tuint64_t flags;              /**< ol_flags related to context build. */\n \tuint32_t cmp_mask;           /**< compare mask */\n-\tunion em_vlan_macip hdrlen;  /**< L2 and L3 header lenghts */\n+\tunion em_vlan_macip hdrlen;  /**< L2 and L3 header lengths */\n };\n \n /**\n@@ -829,7 +829,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t * register.\n \t * Update the RDT with the value of the last processed RX descriptor\n \t * minus 1, to guarantee that the RDT register is never equal to the\n-\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * RDH register, which creates a \"full\" ring situation from the\n \t * hardware point of view...\n \t */\n \tnb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);\n@@ -1074,7 +1074,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t * register.\n \t * Update the RDT with the value of the last processed RX descriptor\n \t * minus 1, to guarantee that the RDT register is never equal to the\n-\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * RDH register, which creates a \"full\" ring situation from the\n \t * hardware point of view...\n \t */\n \tnb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);\ndiff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c\nindex 3ee16c15..4f865d18 100644\n--- a/drivers/net/e1000/igb_ethdev.c\n+++ b/drivers/net/e1000/igb_ethdev.c\n@@ -1149,7 +1149,7 @@ eth_igb_configure(struct rte_eth_dev *dev)\n \tif (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)\n \t\tdev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;\n \n-\t/* multipe queue mode checking */\n+\t/* multiple queue mode checking */\n \tret  = igb_check_mq_mode(dev);\n \tif (ret != 0) {\n \t\tPMD_DRV_LOG(ERR, \"igb_check_mq_mode fails with %d.\",\n@@ -1265,7 +1265,7 @@ eth_igb_start(struct rte_eth_dev *dev)\n \t\t}\n \t}\n \n-\t/* confiugre msix for rx interrupt */\n+\t/* configure msix for rx interrupt */\n \teth_igb_configure_msix_intr(dev);\n \n \t/* Configure for OS presence */\n@@ -2819,7 +2819,7 @@ eth_igb_interrupt_get_status(struct rte_eth_dev *dev)\n }\n \n /*\n- * It executes link_update after knowing an interrupt is prsent.\n+ * It executes link_update after knowing an interrupt is present.\n  *\n  * @param dev\n  *  Pointer to struct rte_eth_dev.\n@@ -2889,7 +2889,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,\n  * @param handle\n  *  Pointer to interrupt handle.\n  * @param param\n- *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ *  The address of parameter (struct rte_eth_dev *) registered before.\n  *\n  * @return\n  *  void\n@@ -3787,7 +3787,7 @@ igb_inject_2uple_filter(struct rte_eth_dev *dev,\n  *\n  * @param\n  * dev: Pointer to struct rte_eth_dev.\n- * ntuple_filter: ponter to the filter that will be added.\n+ * ntuple_filter: pointer to the filter that will be added.\n  *\n  * @return\n  *    - On success, zero.\n@@ -3868,7 +3868,7 @@ igb_delete_2tuple_filter(struct rte_eth_dev *dev,\n  *\n  * @param\n  * dev: Pointer to struct rte_eth_dev.\n- * ntuple_filter: ponter to the filter that will be removed.\n+ * ntuple_filter: pointer to the filter that will be removed.\n  *\n  * @return\n  *    - On success, zero.\n@@ -4226,7 +4226,7 @@ igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,\n  *\n  * @param\n  * dev: Pointer to struct rte_eth_dev.\n- * ntuple_filter: ponter to the filter that will be added.\n+ * ntuple_filter: pointer to the filter that will be added.\n  *\n  * @return\n  *    - On success, zero.\n@@ -4313,7 +4313,7 @@ igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,\n  *\n  * @param\n  * dev: Pointer to struct rte_eth_dev.\n- * ntuple_filter: ponter to the filter that will be removed.\n+ * ntuple_filter: pointer to the filter that will be removed.\n  *\n  * @return\n  *    - On success, zero.\n@@ -4831,7 +4831,7 @@ igb_timesync_disable(struct rte_eth_dev *dev)\n \t/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */\n \tE1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);\n \n-\t/* Stop incrementating the System Time registers. */\n+\t/* Stop incrementing the System Time registers. */\n \tE1000_WRITE_REG(hw, E1000_TIMINCA, 0);\n \n \treturn 0;\ndiff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c\nindex e72376f6..b7f9b942 100644\n--- a/drivers/net/e1000/igb_flow.c\n+++ b/drivers/net/e1000/igb_flow.c\n@@ -57,7 +57,7 @@ struct igb_flex_filter_list igb_filter_flex_list;\n struct igb_rss_filter_list igb_filter_rss_list;\n \n /**\n- * Please aware there's an asumption for all the parsers.\n+ * Please aware there's an assumption for all the parsers.\n  * rte_flow_item is using big endian, rte_flow_attr and\n  * rte_flow_action are using CPU order.\n  * Because the pattern is used to describe the packets,\n@@ -1608,7 +1608,7 @@ igb_flow_create(struct rte_eth_dev *dev,\n \n /**\n  * Check if the flow rule is supported by igb.\n- * It only checkes the format. Don't guarantee the rule can be programmed into\n+ * It only checks the format. Don't guarantee the rule can be programmed into\n  * the HW. Because there can be no enough room for the rule.\n  */\n static int\ndiff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c\nindex fe355ef6..3f3fd0d6 100644\n--- a/drivers/net/e1000/igb_pf.c\n+++ b/drivers/net/e1000/igb_pf.c\n@@ -155,7 +155,7 @@ int igb_pf_host_configure(struct rte_eth_dev *eth_dev)\n \telse\n \t\tE1000_WRITE_REG(hw, E1000_DTXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN);\n \n-\t/* clear VMDq map to perment rar 0 */\n+\t/* clear VMDq map to permanent rar 0 */\n \trah = E1000_READ_REG(hw, E1000_RAH(0));\n \trah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT);\n \tE1000_WRITE_REG(hw, E1000_RAH(0), rah);\ndiff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c\nindex 4a311a7b..f32dee46 100644\n--- a/drivers/net/e1000/igb_rxtx.c\n+++ b/drivers/net/e1000/igb_rxtx.c\n@@ -150,7 +150,7 @@ union igb_tx_offload {\n \t(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)\n \n /**\n- * Strucutre to check if new context need be built\n+ * Structure to check if new context need be built\n  */\n struct igb_advctx_info {\n \tuint64_t flags;           /**< ol_flags related to context build. */\n@@ -967,7 +967,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t * register.\n \t * Update the RDT with the value of the last processed RX descriptor\n \t * minus 1, to guarantee that the RDT register is never equal to the\n-\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * RDH register, which creates a \"full\" ring situation from the\n \t * hardware point of view...\n \t */\n \tnb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);\n@@ -1229,7 +1229,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t * register.\n \t * Update the RDT with the value of the last processed RX descriptor\n \t * minus 1, to guarantee that the RDT register is never equal to the\n-\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * RDH register, which creates a \"full\" ring situation from the\n \t * hardware point of view...\n \t */\n \tnb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);\n@@ -1252,7 +1252,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n  * Maximum number of Ring Descriptors.\n  *\n  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring\n- * desscriptors should meet the following condition:\n+ * descriptors should meet the following condition:\n  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0\n  */\n \n@@ -1350,7 +1350,7 @@ igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)\n \t\t\t\t\t\tsw_ring[tx_id].last_id = tx_id;\n \t\t\t\t\t}\n \n-\t\t\t\t\t/* Move to next segemnt. */\n+\t\t\t\t\t/* Move to next segment. */\n \t\t\t\t\ttx_id = sw_ring[tx_id].next_id;\n \n \t\t\t\t} while (tx_id != tx_next);\n@@ -1383,7 +1383,7 @@ igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)\n \n \t\t\t/* Walk the list and find the next mbuf, if any. */\n \t\t\tdo {\n-\t\t\t\t/* Move to next segemnt. */\n+\t\t\t\t/* Move to next segment. */\n \t\t\t\ttx_id = sw_ring[tx_id].next_id;\n \n \t\t\t\tif (sw_ring[tx_id].mbuf)\n@@ -2146,7 +2146,7 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)\n \n \tigb_rss_disable(dev);\n \n-\t/* RCTL: eanble VLAN filter */\n+\t/* RCTL: enable VLAN filter */\n \trctl = E1000_READ_REG(hw, E1000_RCTL);\n \trctl |= E1000_RCTL_VFE;\n \tE1000_WRITE_REG(hw, E1000_RCTL, rctl);\ndiff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c\nindex 634c97ac..dce26cfa 100644\n--- a/drivers/net/ena/ena_ethdev.c\n+++ b/drivers/net/ena/ena_ethdev.c\n@@ -1408,7 +1408,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)\n \t\t++rxq->rx_stats.refill_partial;\n \t}\n \n-\t/* When we submitted free recources to device... */\n+\t/* When we submitted free resources to device... */\n \tif (likely(i > 0)) {\n \t\t/* ...let HW know that it can fill buffers with data. */\n \t\tena_com_write_sq_doorbell(rxq->ena_com_io_sq);\ndiff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h\nindex 865e1241..f99e4f39 100644\n--- a/drivers/net/ena/ena_ethdev.h\n+++ b/drivers/net/ena/ena_ethdev.h\n@@ -42,7 +42,7 @@\n \n /* While processing submitted and completed descriptors (rx and tx path\n  * respectively) in a loop it is desired to:\n- *  - perform batch submissions while populating sumbissmion queue\n+ *  - perform batch submissions while populating submission queue\n  *  - avoid blocking transmission of other packets during cleanup phase\n  * Hence the utilization ratio of 1/8 of a queue size or max value if the size\n  * of the ring is very big - like 8k Rx rings.\ndiff --git a/drivers/net/enetfec/enet_regs.h b/drivers/net/enetfec/enet_regs.h\nindex a300c6f8..c9400957 100644\n--- a/drivers/net/enetfec/enet_regs.h\n+++ b/drivers/net/enetfec/enet_regs.h\n@@ -12,7 +12,7 @@\n #define RX_BD_CR\t((ushort)0x0004) /* CRC or Frame error */\n #define RX_BD_SH\t((ushort)0x0008) /* Reserved */\n #define RX_BD_NO\t((ushort)0x0010) /* Rcvd non-octet aligned frame */\n-#define RX_BD_LG\t((ushort)0x0020) /* Rcvd frame length voilation */\n+#define RX_BD_LG\t((ushort)0x0020) /* Rcvd frame length violation */\n #define RX_BD_FIRST\t((ushort)0x0400) /* Reserved */\n #define RX_BD_LAST\t((ushort)0x0800) /* last buffer in the frame */\n #define RX_BD_INT\t0x00800000\ndiff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c\nindex 33147169..bc1dcf22 100644\n--- a/drivers/net/enic/enic_flow.c\n+++ b/drivers/net/enic/enic_flow.c\n@@ -21,7 +21,7 @@\n  * so we can easily add new arguments.\n  * item: Item specification.\n  * filter: Partially filled in NIC filter structure.\n- * inner_ofst: If zero, this is an outer header. If non-zero, this is\n+ * inner_offset: If zero, this is an outer header. If non-zero, this is\n  *   the offset into L5 where the header begins.\n  * l2_proto_off: offset to EtherType eth or vlan header.\n  * l3_proto_off: offset to next protocol field in IPv4 or 6 header.\n@@ -29,7 +29,7 @@\n struct copy_item_args {\n \tconst struct rte_flow_item *item;\n \tstruct filter_v2 *filter;\n-\tuint8_t *inner_ofst;\n+\tuint8_t *inner_offset;\n \tuint8_t l2_proto_off;\n \tuint8_t l3_proto_off;\n \tstruct enic *enic;\n@@ -405,7 +405,7 @@ enic_copy_item_ipv4_v1(struct copy_item_args *arg)\n \t\treturn ENOTSUP;\n \t}\n \n-\t/* check that the suppied mask exactly matches capabilty */\n+\t/* check that the supplied mask exactly matches capability */\n \tif (!mask_exact_match((const uint8_t *)&supported_mask,\n \t\t\t      (const uint8_t *)item->mask, sizeof(*mask))) {\n \t\tENICPMD_LOG(ERR, \"IPv4 exact match mask\");\n@@ -443,7 +443,7 @@ enic_copy_item_udp_v1(struct copy_item_args *arg)\n \t\treturn ENOTSUP;\n \t}\n \n-\t/* check that the suppied mask exactly matches capabilty */\n+\t/* check that the supplied mask exactly matches capability */\n \tif (!mask_exact_match((const uint8_t *)&supported_mask,\n \t\t\t      (const uint8_t *)item->mask, sizeof(*mask))) {\n \t\tENICPMD_LOG(ERR, \"UDP exact match mask\");\n@@ -482,7 +482,7 @@ enic_copy_item_tcp_v1(struct copy_item_args *arg)\n \t\treturn ENOTSUP;\n \t}\n \n-\t/* check that the suppied mask exactly matches capabilty */\n+\t/* check that the supplied mask exactly matches capability */\n \tif (!mask_exact_match((const uint8_t *)&supported_mask,\n \t\t\t     (const uint8_t *)item->mask, sizeof(*mask))) {\n \t\tENICPMD_LOG(ERR, \"TCP exact match mask\");\n@@ -504,7 +504,7 @@ enic_copy_item_tcp_v1(struct copy_item_args *arg)\n  * we set EtherType and IP proto as necessary.\n  */\n static int\n-copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,\n+copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_offset,\n \t\t  const void *val, const void *mask, uint8_t val_size,\n \t\t  uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)\n {\n@@ -512,7 +512,7 @@ copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,\n \tuint8_t start_off;\n \n \t/* No space left in the L5 pattern buffer. */\n-\tstart_off = *inner_ofst;\n+\tstart_off = *inner_offset;\n \tif ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)\n \t\treturn ENOTSUP;\n \tl5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;\n@@ -537,7 +537,7 @@ copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,\n \t\t}\n \t}\n \t/* All inner headers land in L5 buffer even if their spec is null. */\n-\t*inner_ofst += val_size;\n+\t*inner_offset += val_size;\n \treturn 0;\n }\n \n@@ -545,7 +545,7 @@ static int\n enic_copy_item_inner_eth_v2(struct copy_item_args *arg)\n {\n \tconst void *mask = arg->item->mask;\n-\tuint8_t *off = arg->inner_ofst;\n+\tuint8_t *off = arg->inner_offset;\n \n \tENICPMD_FUNC_TRACE();\n \tif (!mask)\n@@ -560,7 +560,7 @@ static int\n enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)\n {\n \tconst void *mask = arg->item->mask;\n-\tuint8_t *off = arg->inner_ofst;\n+\tuint8_t *off = arg->inner_offset;\n \tuint8_t eth_type_off;\n \n \tENICPMD_FUNC_TRACE();\n@@ -578,7 +578,7 @@ static int\n enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)\n {\n \tconst void *mask = arg->item->mask;\n-\tuint8_t *off = arg->inner_ofst;\n+\tuint8_t *off = arg->inner_offset;\n \n \tENICPMD_FUNC_TRACE();\n \tif (!mask)\n@@ -594,7 +594,7 @@ static int\n enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)\n {\n \tconst void *mask = arg->item->mask;\n-\tuint8_t *off = arg->inner_ofst;\n+\tuint8_t *off = arg->inner_offset;\n \n \tENICPMD_FUNC_TRACE();\n \tif (!mask)\n@@ -610,7 +610,7 @@ static int\n enic_copy_item_inner_udp_v2(struct copy_item_args *arg)\n {\n \tconst void *mask = arg->item->mask;\n-\tuint8_t *off = arg->inner_ofst;\n+\tuint8_t *off = arg->inner_offset;\n \n \tENICPMD_FUNC_TRACE();\n \tif (!mask)\n@@ -625,7 +625,7 @@ static int\n enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)\n {\n \tconst void *mask = arg->item->mask;\n-\tuint8_t *off = arg->inner_ofst;\n+\tuint8_t *off = arg->inner_offset;\n \n \tENICPMD_FUNC_TRACE();\n \tif (!mask)\n@@ -899,7 +899,7 @@ enic_copy_item_vxlan_v2(struct copy_item_args *arg)\n {\n \tconst struct rte_flow_item *item = arg->item;\n \tstruct filter_v2 *enic_filter = arg->filter;\n-\tuint8_t *inner_ofst = arg->inner_ofst;\n+\tuint8_t *inner_offset = arg->inner_offset;\n \tconst struct rte_flow_item_vxlan *spec = item->spec;\n \tconst struct rte_flow_item_vxlan *mask = item->mask;\n \tstruct filter_generic_1 *gp = &enic_filter->u.generic_1;\n@@ -929,7 +929,7 @@ enic_copy_item_vxlan_v2(struct copy_item_args *arg)\n \tmemcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,\n \t       sizeof(struct rte_vxlan_hdr));\n \n-\t*inner_ofst = sizeof(struct rte_vxlan_hdr);\n+\t*inner_offset = sizeof(struct rte_vxlan_hdr);\n \treturn 0;\n }\n \n@@ -943,7 +943,7 @@ enic_copy_item_raw_v2(struct copy_item_args *arg)\n {\n \tconst struct rte_flow_item *item = arg->item;\n \tstruct filter_v2 *enic_filter = arg->filter;\n-\tuint8_t *inner_ofst = arg->inner_ofst;\n+\tuint8_t *inner_offset = arg->inner_offset;\n \tconst struct rte_flow_item_raw *spec = item->spec;\n \tconst struct rte_flow_item_raw *mask = item->mask;\n \tstruct filter_generic_1 *gp = &enic_filter->u.generic_1;\n@@ -951,7 +951,7 @@ enic_copy_item_raw_v2(struct copy_item_args *arg)\n \tENICPMD_FUNC_TRACE();\n \n \t/* Cannot be used for inner packet */\n-\tif (*inner_ofst)\n+\tif (*inner_offset)\n \t\treturn EINVAL;\n \t/* Need both spec and mask */\n \tif (!spec || !mask)\n@@ -1020,13 +1020,13 @@ item_stacking_valid(enum rte_flow_item_type prev_item,\n  */\n static void\n fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,\n-\t       uint8_t inner_ofst)\n+\t       uint8_t inner_offset)\n {\n \tuint8_t layer[FILTER_GENERIC_1_KEY_LEN];\n \tuint8_t inner;\n \tuint8_t vxlan;\n \n-\tif (!(inner_ofst > 0 && enic->vxlan))\n+\tif (!(inner_offset > 0 && enic->vxlan))\n \t\treturn;\n \tENICPMD_FUNC_TRACE();\n \tvxlan = sizeof(struct rte_vxlan_hdr);\n@@ -1034,7 +1034,7 @@ fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,\n \t       gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);\n \tmemcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr),\n \t       gp->layer[FILTER_GENERIC_1_L5].val, vxlan);\n-\tinner = inner_ofst - vxlan;\n+\tinner = inner_offset - vxlan;\n \tmemset(layer, 0, sizeof(layer));\n \tmemcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);\n \tmemcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));\n@@ -1044,14 +1044,14 @@ fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,\n }\n \n /**\n- * Build the intenal enic filter structure from the provided pattern. The\n+ * Build the internal enic filter structure from the provided pattern. The\n  * pattern is validated as the items are copied.\n  *\n  * @param pattern[in]\n  * @param items_info[in]\n  *   Info about this NICs item support, like valid previous items.\n  * @param enic_filter[out]\n- *   NIC specfilc filters derived from the pattern.\n+ *   NIC specific filters derived from the pattern.\n  * @param error[out]\n  */\n static int\n@@ -1063,7 +1063,7 @@ enic_copy_filter(const struct rte_flow_item pattern[],\n {\n \tint ret;\n \tconst struct rte_flow_item *item = pattern;\n-\tuint8_t inner_ofst = 0; /* If encapsulated, ofst into L5 */\n+\tuint8_t inner_offset = 0; /* If encapsulated, offset into L5 */\n \tenum rte_flow_item_type prev_item;\n \tconst struct enic_items *item_info;\n \tstruct copy_item_args args;\n@@ -1075,7 +1075,7 @@ enic_copy_filter(const struct rte_flow_item pattern[],\n \tprev_item = 0;\n \n \targs.filter = enic_filter;\n-\targs.inner_ofst = &inner_ofst;\n+\targs.inner_offset = &inner_offset;\n \targs.enic = enic;\n \tfor (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {\n \t\t/* Get info about how to validate and copy the item. If NULL\n@@ -1087,7 +1087,7 @@ enic_copy_filter(const struct rte_flow_item pattern[],\n \t\titem_info = &cap->item_info[item->type];\n \t\tif (item->type > cap->max_item_type ||\n \t\t    item_info->copy_item == NULL ||\n-\t\t    (inner_ofst > 0 && item_info->inner_copy_item == NULL)) {\n+\t\t    (inner_offset > 0 && item_info->inner_copy_item == NULL)) {\n \t\t\trte_flow_error_set(error, ENOTSUP,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\tNULL, \"Unsupported item.\");\n@@ -1099,7 +1099,7 @@ enic_copy_filter(const struct rte_flow_item pattern[],\n \t\t\tgoto stacking_error;\n \n \t\targs.item = item;\n-\t\tcopy_fn = inner_ofst > 0 ? item_info->inner_copy_item :\n+\t\tcopy_fn = inner_offset > 0 ? item_info->inner_copy_item :\n \t\t\titem_info->copy_item;\n \t\tret = copy_fn(&args);\n \t\tif (ret)\n@@ -1107,7 +1107,7 @@ enic_copy_filter(const struct rte_flow_item pattern[],\n \t\tprev_item = item->type;\n \t\tis_first_item = 0;\n \t}\n-\tfixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);\n+\tfixup_l5_layer(enic, &enic_filter->u.generic_1, inner_offset);\n \n \treturn 0;\n \n@@ -1123,12 +1123,12 @@ enic_copy_filter(const struct rte_flow_item pattern[],\n }\n \n /**\n- * Build the intenal version 1 NIC action structure from the provided pattern.\n+ * Build the internal version 1 NIC action structure from the provided pattern.\n  * The pattern is validated as the items are copied.\n  *\n  * @param actions[in]\n  * @param enic_action[out]\n- *   NIC specfilc actions derived from the actions.\n+ *   NIC specific actions derived from the actions.\n  * @param error[out]\n  */\n static int\n@@ -1170,12 +1170,12 @@ enic_copy_action_v1(__rte_unused struct enic *enic,\n }\n \n /**\n- * Build the intenal version 2 NIC action structure from the provided pattern.\n+ * Build the internal version 2 NIC action structure from the provided pattern.\n  * The pattern is validated as the items are copied.\n  *\n  * @param actions[in]\n  * @param enic_action[out]\n- *   NIC specfilc actions derived from the actions.\n+ *   NIC specific actions derived from the actions.\n  * @param error[out]\n  */\n static int\ndiff --git a/drivers/net/enic/enic_fm_flow.c b/drivers/net/enic/enic_fm_flow.c\nindex ae43f36b..bef842d4 100644\n--- a/drivers/net/enic/enic_fm_flow.c\n+++ b/drivers/net/enic/enic_fm_flow.c\n@@ -721,7 +721,7 @@ enic_fm_copy_item_gtp(struct copy_item_args *arg)\n \t}\n \n \t/* NIC does not support GTP tunnels. No Items are allowed after this.\n-\t * This prevents the specificaiton of further items.\n+\t * This prevents the specification of further items.\n \t */\n \targ->header_level = 0;\n \n@@ -733,7 +733,7 @@ enic_fm_copy_item_gtp(struct copy_item_args *arg)\n \n \t/*\n \t * Use the raw L4 buffer to match GTP as fm_header_set does not have\n-\t * GTP header. UDP dst port must be specifiec. Using the raw buffer\n+\t * GTP header. UDP dst port must be specific. Using the raw buffer\n \t * does not affect such UDP item, since we skip UDP in the raw buffer.\n \t */\n \tfm_data->fk_header_select |= FKH_L4RAW;\n@@ -1846,7 +1846,7 @@ enic_fm_dump_tcam_actions(const struct fm_action *fm_action)\n \t/* Remove trailing comma */\n \tif (buf[0])\n \t\t*(bp - 1) = '\\0';\n-\tENICPMD_LOG(DEBUG, \"       Acions: %s\", buf);\n+\tENICPMD_LOG(DEBUG, \"       Actions: %s\", buf);\n }\n \n static int\n@@ -2364,7 +2364,7 @@ enic_action_handle_get(struct enic_flowman *fm, struct fm_action *action_in,\n \tif (ret < 0 && ret != -ENOENT)\n \t\treturn rte_flow_error_set(error, -ret,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t   NULL, \"enic: rte_hash_lookup(aciton)\");\n+\t\t\t\t   NULL, \"enic: rte_hash_lookup(action)\");\n \n \tif (ret == -ENOENT) {\n \t\t/* Allocate a new action on the NIC. */\n@@ -2435,7 +2435,7 @@ __enic_fm_flow_add_entry(struct enic_flowman *fm,\n \n \tENICPMD_FUNC_TRACE();\n \n-\t/* Get or create an aciton handle. */\n+\t/* Get or create an action handle. */\n \tret = enic_action_handle_get(fm, action_in, error, &ah);\n \tif (ret)\n \t\treturn ret;\ndiff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c\nindex 7f84b5f9..97d97ea7 100644\n--- a/drivers/net/enic/enic_main.c\n+++ b/drivers/net/enic/enic_main.c\n@@ -1137,7 +1137,7 @@ int enic_disable(struct enic *enic)\n \t}\n \n \t/* If we were using interrupts, set the interrupt vector to -1\n-\t * to disable interrupts.  We are not disabling link notifcations,\n+\t * to disable interrupts.  We are not disabling link notifications,\n \t * though, as we want the polling of link status to continue working.\n \t */\n \tif (enic->rte_dev->data->dev_conf.intr_conf.lsc)\ndiff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c\nindex c44715bf..33e96b48 100644\n--- a/drivers/net/enic/enic_rxtx.c\n+++ b/drivers/net/enic/enic_rxtx.c\n@@ -653,7 +653,7 @@ static void enqueue_simple_pkts(struct rte_mbuf **pkts,\n \t\t * The app should not send oversized\n \t\t * packets. tx_pkt_prepare includes a check as\n \t\t * well. But some apps ignore the device max size and\n-\t\t * tx_pkt_prepare. Oversized packets cause WQ errrors\n+\t\t * tx_pkt_prepare. Oversized packets cause WQ errors\n \t\t * and the NIC ends up disabling the whole WQ. So\n \t\t * truncate packets..\n \t\t */\ndiff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h\nindex 7cfa29fa..17a7056c 100644\n--- a/drivers/net/fm10k/fm10k.h\n+++ b/drivers/net/fm10k/fm10k.h\n@@ -44,7 +44,7 @@\n #define FM10K_TX_MAX_MTU_SEG UINT8_MAX\n \n /*\n- * byte aligment for HW RX data buffer\n+ * byte alignment for HW RX data buffer\n  * Datasheet requires RX buffer addresses shall either be 512-byte aligned or\n  * be 8-byte aligned but without crossing host memory pages (4KB alignment\n  * boundaries). Satisfy first option.\ndiff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c\nindex 43e1d134..8bbd8b44 100644\n--- a/drivers/net/fm10k/fm10k_ethdev.c\n+++ b/drivers/net/fm10k/fm10k_ethdev.c\n@@ -290,7 +290,7 @@ rx_queue_free(struct fm10k_rx_queue *q)\n }\n \n /*\n- * disable RX queue, wait unitl HW finished necessary flush operation\n+ * disable RX queue, wait until HW finished necessary flush operation\n  */\n static inline int\n rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)\n@@ -379,7 +379,7 @@ tx_queue_free(struct fm10k_tx_queue *q)\n }\n \n /*\n- * disable TX queue, wait unitl HW finished necessary flush operation\n+ * disable TX queue, wait until HW finished necessary flush operation\n  */\n static inline int\n tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)\n@@ -453,7 +453,7 @@ fm10k_dev_configure(struct rte_eth_dev *dev)\n \tif (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)\n \t\tdev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;\n \n-\t/* multipe queue mode checking */\n+\t/* multiple queue mode checking */\n \tret  = fm10k_check_mq_mode(dev);\n \tif (ret != 0) {\n \t\tPMD_DRV_LOG(ERR, \"fm10k_check_mq_mode fails with %d.\",\n@@ -2553,7 +2553,7 @@ fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)\n  * @param handle\n  *  Pointer to interrupt handle.\n  * @param param\n- *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ *  The address of parameter (struct rte_eth_dev *) registered before.\n  *\n  * @return\n  *  void\n@@ -2676,7 +2676,7 @@ fm10k_dev_interrupt_handler_pf(void *param)\n  * @param handle\n  *  Pointer to interrupt handle.\n  * @param param\n- *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ *  The address of parameter (struct rte_eth_dev *) registered before.\n  *\n  * @return\n  *  void\n@@ -3034,7 +3034,7 @@ fm10k_params_init(struct rte_eth_dev *dev)\n \tstruct fm10k_dev_info *info =\n \t\tFM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);\n \n-\t/* Inialize bus info. Normally we would call fm10k_get_bus_info(), but\n+\t/* Initialize bus info. Normally we would call fm10k_get_bus_info(), but\n \t * there is no way to get link status without reading BAR4.  Until this\n \t * works, assume we have maximum bandwidth.\n \t * @todo - fix bus info\ndiff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c\nindex 1269250e..10ce5a75 100644\n--- a/drivers/net/fm10k/fm10k_rxtx_vec.c\n+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c\n@@ -212,7 +212,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)\n \tstruct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;\n \n #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE\n-\t/* whithout rx ol_flags, no VP flag report */\n+\t/* without rx ol_flags, no VP flag report */\n \tif (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)\n \t\treturn -1;\n #endif\n@@ -239,7 +239,7 @@ fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq)\n \tstruct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */\n \n \tmb_def.nb_segs = 1;\n-\t/* data_off will be ajusted after new mbuf allocated for 512-byte\n+\t/* data_off will be adjusted after new mbuf allocated for 512-byte\n \t * alignment.\n \t */\n \tmb_def.data_off = RTE_PKTMBUF_HEADROOM;\n@@ -410,7 +410,7 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n \tif (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD))\n \t\treturn 0;\n \n-\t/* Vecotr RX will process 4 packets at a time, strip the unaligned\n+\t/* Vector RX will process 4 packets at a time, strip the unaligned\n \t * tails in case it's not multiple of 4.\n \t */\n \tnb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP);\n@@ -481,7 +481,7 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);\n \n #if defined(RTE_ARCH_X86_64)\n-\t\t/* B.1 load 2 64 bit mbuf poitns */\n+\t\t/* B.1 load 2 64 bit mbuf points */\n \t\tmbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);\n #endif\n \n@@ -573,7 +573,7 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n \n \t\tfm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]);\n \n-\t\t/* C.4 calc avaialbe number of desc */\n+\t\t/* C.4 calc available number of desc */\n \t\tvar = __builtin_popcountll(_mm_cvtsi128_si64(staterr));\n \t\tnb_pkts_recd += var;\n \t\tif (likely(var != RTE_FM10K_DESCS_PER_LOOP))\ndiff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c\nindex 1853511c..0121b0c2 100644\n--- a/drivers/net/hinic/hinic_pmd_ethdev.c\n+++ b/drivers/net/hinic/hinic_pmd_ethdev.c\n@@ -255,7 +255,7 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask);\n  * Interrupt handler triggered by NIC  for handling\n  * specific event.\n  *\n- * @param: The address of parameter (struct rte_eth_dev *) regsitered before.\n+ * @param: The address of parameter (struct rte_eth_dev *) registered before.\n  */\n static void hinic_dev_interrupt_handler(void *param)\n {\n@@ -336,7 +336,7 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)\n \t\treturn err;\n \t}\n \n-\t/* init vlan offoad */\n+\t/* init vlan offload */\n \terr = hinic_vlan_offload_set(dev,\n \t\t\t\tRTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);\n \tif (err) {\ndiff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h\nindex 5eca8b10..8e6251f6 100644\n--- a/drivers/net/hinic/hinic_pmd_ethdev.h\n+++ b/drivers/net/hinic/hinic_pmd_ethdev.h\n@@ -170,7 +170,7 @@ struct tag_tcam_key_mem {\n \t\t/*\n \t\t * tunnel packet, mask must be 0xff, spec value is 1;\n \t\t * normal packet, mask must be 0, spec value is 0;\n-\t\t * if tunnal packet, ucode use\n+\t\t * if tunnel packet, ucode use\n \t\t * sip/dip/protocol/src_port/dst_dport from inner packet\n \t\t */\n \t\tu32 tunnel_flag:8;\ndiff --git a/drivers/net/hinic/hinic_pmd_flow.c b/drivers/net/hinic/hinic_pmd_flow.c\nindex d71a42af..9620e138 100644\n--- a/drivers/net/hinic/hinic_pmd_flow.c\n+++ b/drivers/net/hinic/hinic_pmd_flow.c\n@@ -232,7 +232,7 @@ static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,\n }\n \n static int\n-hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,\n+hinic_parse_ethertype_action(const struct rte_flow_action *actions,\n \t\t\tconst struct rte_flow_action *act,\n \t\t\tconst struct rte_flow_action_queue *act_q,\n \t\t\tstruct rte_eth_ethertype_filter *filter,\n@@ -344,7 +344,7 @@ static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,\n \t\treturn -rte_errno;\n \t}\n \n-\tif (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))\n+\tif (hinic_parse_ethertype_action(actions, act, act_q, filter, error))\n \t\treturn -rte_errno;\n \n \tif (hinic_check_ethertype_attr_ele(attr, error))\n@@ -734,7 +734,7 @@ static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,\n  * END\n  * other members in mask and spec should set to 0x00.\n  * item->last should be NULL.\n- * Please aware there's an asumption for all the parsers.\n+ * Please aware there's an assumption for all the parsers.\n  * rte_flow_item is using big endian, rte_flow_attr and\n  * rte_flow_action are using CPU order.\n  * Because the pattern is used to describe the packets,\n@@ -1630,7 +1630,7 @@ static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,\n \n /**\n  * Check if the flow rule is supported by nic.\n- * It only checkes the format. Don't guarantee the rule can be programmed into\n+ * It only checks the format. Don't guarantee the rule can be programmed into\n  * the HW. Because there can be no enough room for the rule.\n  */\n static int hinic_flow_validate(struct rte_eth_dev *dev,\ndiff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c\nindex 7adb6e36..db63c855 100644\n--- a/drivers/net/hinic/hinic_pmd_rx.c\n+++ b/drivers/net/hinic/hinic_pmd_rx.c\n@@ -142,33 +142,33 @@\n #define HINIC_GET_SUPER_CQE_EN(pkt_info)\t\\\n \tRQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN)\n \n-#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT\t\t21\n-#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK\t\t0x1U\n+#define RQ_CQE_OFFLOAD_TYPE_VLAN_EN_SHIFT\t\t21\n+#define RQ_CQE_OFFLOAD_TYPE_VLAN_EN_MASK\t\t0x1U\n \n-#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT\t\t0\n-#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK\t\t0xFFFU\n+#define RQ_CQE_OFFLOAD_TYPE_PKT_TYPE_SHIFT\t\t0\n+#define RQ_CQE_OFFLOAD_TYPE_PKT_TYPE_MASK\t\t0xFFFU\n \n-#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT\t\t19\n-#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK\t\t0x3U\n+#define RQ_CQE_OFFLOAD_TYPE_PKT_UMBCAST_SHIFT\t\t19\n+#define RQ_CQE_OFFLOAD_TYPE_PKT_UMBCAST_MASK\t\t0x3U\n \n-#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT\t\t24\n-#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK\t\t0xFFU\n+#define RQ_CQE_OFFLOAD_TYPE_RSS_TYPE_SHIFT\t\t24\n+#define RQ_CQE_OFFLOAD_TYPE_RSS_TYPE_MASK\t\t0xFFU\n \n-#define RQ_CQE_OFFOLAD_TYPE_GET(val, member)\t\t(((val) >> \\\n-\t\t\t\tRQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \\\n-\t\t\t\tRQ_CQE_OFFOLAD_TYPE_##member##_MASK)\n+#define RQ_CQE_OFFLOAD_TYPE_GET(val, member)\t\t(((val) >> \\\n+\t\t\t\tRQ_CQE_OFFLOAD_TYPE_##member##_SHIFT) & \\\n+\t\t\t\tRQ_CQE_OFFLOAD_TYPE_##member##_MASK)\n \n #define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)\t\\\n-\t\tRQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN)\n+\t\tRQ_CQE_OFFLOAD_TYPE_GET(offload_type, VLAN_EN)\n \n #define HINIC_GET_RSS_TYPES(offload_type)\t\\\n-\t\tRQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE)\n+\t\tRQ_CQE_OFFLOAD_TYPE_GET(offload_type, RSS_TYPE)\n \n #define HINIC_GET_RX_PKT_TYPE(offload_type)\t\\\n-\t\tRQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE)\n+\t\tRQ_CQE_OFFLOAD_TYPE_GET(offload_type, PKT_TYPE)\n \n #define HINIC_GET_RX_PKT_UMBCAST(offload_type)\t\\\n-\t\tRQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST)\n+\t\tRQ_CQE_OFFLOAD_TYPE_GET(offload_type, PKT_UMBCAST)\n \n #define RQ_CQE_STATUS_CSUM_BYPASS_VAL\t\t\t0x80U\n #define RQ_CQE_STATUS_CSUM_ERR_IP_MASK\t\t\t0x39U\ndiff --git a/drivers/net/hinic/hinic_pmd_tx.c b/drivers/net/hinic/hinic_pmd_tx.c\nindex 2688817f..f09b1a6e 100644\n--- a/drivers/net/hinic/hinic_pmd_tx.c\n+++ b/drivers/net/hinic/hinic_pmd_tx.c\n@@ -1144,7 +1144,7 @@ u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts)\n \t\tmbuf_pkt = *tx_pkts++;\n \t\tqueue_info = 0;\n \n-\t\t/* 1. parse sge and tx offlod info from mbuf */\n+\t\t/* 1. parse sge and tx offload info from mbuf */\n \t\tif (unlikely(!hinic_get_sge_txoff_info(mbuf_pkt,\n \t\t\t\t\t\t       &sqe_info, &off_info))) {\n \t\t\ttxq->txq_stats.off_errs++;\ndiff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c\nindex 2ce59d8d..5b42d38a 100644\n--- a/drivers/net/hns3/hns3_cmd.c\n+++ b/drivers/net/hns3/hns3_cmd.c\n@@ -466,7 +466,7 @@ hns3_mask_capability(struct hns3_hw *hw,\n \tfor (i = 0; i < MAX_CAPS_BIT; i++) {\n \t\tif (!(caps_masked & BIT_ULL(i)))\n \t\t\tcontinue;\n-\t\thns3_info(hw, \"mask capabiliy: id-%u, name-%s.\",\n+\t\thns3_info(hw, \"mask capability: id-%u, name-%s.\",\n \t\t\t  i, hns3_get_caps_name(i));\n \t}\n }\n@@ -736,7 +736,7 @@ hns3_cmd_init(struct hns3_hw *hw)\n \t\treturn 0;\n \n \t/*\n-\t * Requiring firmware to enable some features, firber port can still\n+\t * Requiring firmware to enable some features, fiber port can still\n \t * work without it, but copper port can't work because the firmware\n \t * fails to take over the PHY.\n \t */\ndiff --git a/drivers/net/hns3/hns3_common.c b/drivers/net/hns3/hns3_common.c\nindex eac2aa10..0bb552ea 100644\n--- a/drivers/net/hns3/hns3_common.c\n+++ b/drivers/net/hns3/hns3_common.c\n@@ -603,7 +603,7 @@ hns3_init_ring_with_vector(struct hns3_hw *hw)\n \thw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);\n \tfor (i = 0; i < hw->intr_tqps_num; i++) {\n \t\t/*\n-\t\t * Set gap limiter/rate limiter/quanity limiter algorithm\n+\t\t * Set gap limiter/rate limiter/quantity limiter algorithm\n \t\t * configuration for interrupt coalesce of queue's interrupt.\n \t\t */\n \t\thns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,\ndiff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c\nindex 3d0159d7..c8a1fb2c 100644\n--- a/drivers/net/hns3/hns3_dcb.c\n+++ b/drivers/net/hns3/hns3_dcb.c\n@@ -25,7 +25,7 @@\n  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)\n  *\t\tTick * (2 ^ IR_s)\n  *\n- * @return: 0: calculate sucessful, negative: fail\n+ * @return: 0: calculate successful, negative: fail\n  */\n static int\n hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,\n@@ -36,8 +36,8 @@ hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,\n #define DIVISOR_IR_B_126\t(126 * DIVISOR_CLK)\n \n \tconst uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {\n-\t\t6 * 256,    /* Prioriy level */\n-\t\t6 * 32,     /* Prioriy group level */\n+\t\t6 * 256,    /* Priority level */\n+\t\t6 * 32,     /* Priority group level */\n \t\t6 * 8,      /* Port level */\n \t\t6 * 256     /* Qset level */\n \t};\n@@ -312,30 +312,30 @@ hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)\n }\n \n static uint32_t\n-hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,\n+hns3_dcb_get_shaping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,\n \t\t\t   uint8_t bs_b, uint8_t bs_s)\n {\n-\tuint32_t shapping_para = 0;\n+\tuint32_t shaping_para = 0;\n \n-\t/* If ir_b is zero it means IR is 0Mbps, return zero of shapping_para */\n+\t/* If ir_b is zero it means IR is 0Mbps, return zero of shaping_para */\n \tif (ir_b == 0)\n-\t\treturn shapping_para;\n+\t\treturn shaping_para;\n \n-\thns3_dcb_set_field(shapping_para, IR_B, ir_b);\n-\thns3_dcb_set_field(shapping_para, IR_U, ir_u);\n-\thns3_dcb_set_field(shapping_para, IR_S, ir_s);\n-\thns3_dcb_set_field(shapping_para, BS_B, bs_b);\n-\thns3_dcb_set_field(shapping_para, BS_S, bs_s);\n+\thns3_dcb_set_field(shaping_para, IR_B, ir_b);\n+\thns3_dcb_set_field(shaping_para, IR_U, ir_u);\n+\thns3_dcb_set_field(shaping_para, IR_S, ir_s);\n+\thns3_dcb_set_field(shaping_para, BS_B, bs_b);\n+\thns3_dcb_set_field(shaping_para, BS_S, bs_s);\n \n-\treturn shapping_para;\n+\treturn shaping_para;\n }\n \n static int\n hns3_dcb_port_shaper_cfg(struct hns3_hw *hw, uint32_t speed)\n {\n-\tstruct hns3_port_shapping_cmd *shap_cfg_cmd;\n+\tstruct hns3_port_shaping_cmd *shap_cfg_cmd;\n \tstruct hns3_shaper_parameter shaper_parameter;\n-\tuint32_t shapping_para;\n+\tuint32_t shaping_para;\n \tuint32_t ir_u, ir_b, ir_s;\n \tstruct hns3_cmd_desc desc;\n \tint ret;\n@@ -348,21 +348,21 @@ hns3_dcb_port_shaper_cfg(struct hns3_hw *hw, uint32_t speed)\n \t}\n \n \thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);\n-\tshap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;\n+\tshap_cfg_cmd = (struct hns3_port_shaping_cmd *)desc.data;\n \n \tir_b = shaper_parameter.ir_b;\n \tir_u = shaper_parameter.ir_u;\n \tir_s = shaper_parameter.ir_s;\n-\tshapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,\n+\tshaping_para = hns3_dcb_get_shaping_para(ir_b, ir_u, ir_s,\n \t\t\t\t\t\t   HNS3_SHAPER_BS_U_DEF,\n \t\t\t\t\t\t   HNS3_SHAPER_BS_S_DEF);\n \n-\tshap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);\n+\tshap_cfg_cmd->port_shaping_para = rte_cpu_to_le_32(shaping_para);\n \n \t/*\n \t * Configure the port_rate and set bit HNS3_TM_RATE_VLD_B of flag\n-\t * field in hns3_port_shapping_cmd to require firmware to recalculate\n-\t * shapping parameters. And whether the parameters are recalculated\n+\t * field in hns3_port_shaping_cmd to require firmware to recalculate\n+\t * shaping parameters. And whether the parameters are recalculated\n \t * depends on the firmware version. But driver still needs to\n \t * calculate it and configure to firmware for better compatibility.\n \t */\n@@ -385,10 +385,10 @@ hns3_port_shaper_update(struct hns3_hw *hw, uint32_t speed)\n }\n \n static int\n-hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,\n-\t\t\t uint8_t pg_id, uint32_t shapping_para, uint32_t rate)\n+hns3_dcb_pg_shaping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,\n+\t\t\t uint8_t pg_id, uint32_t shaping_para, uint32_t rate)\n {\n-\tstruct hns3_pg_shapping_cmd *shap_cfg_cmd;\n+\tstruct hns3_pg_shaping_cmd *shap_cfg_cmd;\n \tenum hns3_opcode_type opcode;\n \tstruct hns3_cmd_desc desc;\n \n@@ -396,15 +396,15 @@ hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,\n \t\t HNS3_OPC_TM_PG_C_SHAPPING;\n \thns3_cmd_setup_basic_desc(&desc, opcode, false);\n \n-\tshap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;\n+\tshap_cfg_cmd = (struct hns3_pg_shaping_cmd *)desc.data;\n \n \tshap_cfg_cmd->pg_id = pg_id;\n \n-\tshap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);\n+\tshap_cfg_cmd->pg_shaping_para = rte_cpu_to_le_32(shaping_para);\n \n \t/*\n \t * Configure the pg_rate and set bit HNS3_TM_RATE_VLD_B of flag field in\n-\t * hns3_pg_shapping_cmd to require firmware to recalculate shapping\n+\t * hns3_pg_shaping_cmd to require firmware to recalculate shaping\n \t * parameters. And whether parameters are recalculated depends on\n \t * the firmware version. But driver still needs to calculate it and\n \t * configure to firmware for better compatibility.\n@@ -432,11 +432,11 @@ hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate)\n \t\treturn ret;\n \t}\n \n-\tshaper_para = hns3_dcb_get_shapping_para(0, 0, 0,\n+\tshaper_para = hns3_dcb_get_shaping_para(0, 0, 0,\n \t\t\t\t\t\t HNS3_SHAPER_BS_U_DEF,\n \t\t\t\t\t\t HNS3_SHAPER_BS_S_DEF);\n \n-\tret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, pg_id,\n+\tret = hns3_dcb_pg_shaping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, pg_id,\n \t\t\t\t       shaper_para, rate);\n \tif (ret) {\n \t\thns3_err(hw, \"config PG CIR shaper parameter fail, ret = %d.\",\n@@ -447,11 +447,11 @@ hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate)\n \tir_b = shaper_parameter.ir_b;\n \tir_u = shaper_parameter.ir_u;\n \tir_s = shaper_parameter.ir_s;\n-\tshaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,\n+\tshaper_para = hns3_dcb_get_shaping_para(ir_b, ir_u, ir_s,\n \t\t\t\t\t\t HNS3_SHAPER_BS_U_DEF,\n \t\t\t\t\t\t HNS3_SHAPER_BS_S_DEF);\n \n-\tret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, pg_id,\n+\tret = hns3_dcb_pg_shaping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, pg_id,\n \t\t\t\t       shaper_para, rate);\n \tif (ret) {\n \t\thns3_err(hw, \"config PG PIR shaper parameter fail, ret = %d.\",\n@@ -520,10 +520,10 @@ hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)\n }\n \n static int\n-hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,\n-\t\t\t  uint8_t pri_id, uint32_t shapping_para, uint32_t rate)\n+hns3_dcb_pri_shaping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,\n+\t\t\t  uint8_t pri_id, uint32_t shaping_para, uint32_t rate)\n {\n-\tstruct hns3_pri_shapping_cmd *shap_cfg_cmd;\n+\tstruct hns3_pri_shaping_cmd *shap_cfg_cmd;\n \tenum hns3_opcode_type opcode;\n \tstruct hns3_cmd_desc desc;\n \n@@ -532,16 +532,16 @@ hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,\n \n \thns3_cmd_setup_basic_desc(&desc, opcode, false);\n \n-\tshap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;\n+\tshap_cfg_cmd = (struct hns3_pri_shaping_cmd *)desc.data;\n \n \tshap_cfg_cmd->pri_id = pri_id;\n \n-\tshap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);\n+\tshap_cfg_cmd->pri_shaping_para = rte_cpu_to_le_32(shaping_para);\n \n \t/*\n \t * Configure the pri_rate and set bit HNS3_TM_RATE_VLD_B of flag\n-\t * field in hns3_pri_shapping_cmd to require firmware to recalculate\n-\t * shapping parameters. And whether the parameters are recalculated\n+\t * field in hns3_pri_shaping_cmd to require firmware to recalculate\n+\t * shaping parameters. And whether the parameters are recalculated\n \t * depends on the firmware version. But driver still needs to\n \t * calculate it and configure to firmware for better compatibility.\n \t */\n@@ -567,11 +567,11 @@ hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate)\n \t\treturn ret;\n \t}\n \n-\tshaper_para = hns3_dcb_get_shapping_para(0, 0, 0,\n+\tshaper_para = hns3_dcb_get_shaping_para(0, 0, 0,\n \t\t\t\t\t\t HNS3_SHAPER_BS_U_DEF,\n \t\t\t\t\t\t HNS3_SHAPER_BS_S_DEF);\n \n-\tret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, tc_no,\n+\tret = hns3_dcb_pri_shaping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, tc_no,\n \t\t\t\t\tshaper_para, rate);\n \tif (ret) {\n \t\thns3_err(hw,\n@@ -583,11 +583,11 @@ hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate)\n \tir_b = shaper_parameter.ir_b;\n \tir_u = shaper_parameter.ir_u;\n \tir_s = shaper_parameter.ir_s;\n-\tshaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,\n+\tshaper_para = hns3_dcb_get_shaping_para(ir_b, ir_u, ir_s,\n \t\t\t\t\t\t HNS3_SHAPER_BS_U_DEF,\n \t\t\t\t\t\t HNS3_SHAPER_BS_S_DEF);\n \n-\tret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, tc_no,\n+\tret = hns3_dcb_pri_shaping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, tc_no,\n \t\t\t\t\tshaper_para, rate);\n \tif (ret) {\n \t\thns3_err(hw,\n@@ -1532,7 +1532,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns)\n \n \tret = hns3_dcb_schd_setup_hw(hw);\n \tif (ret) {\n-\t\thns3_err(hw, \"dcb schdule configure failed! ret = %d\", ret);\n+\t\thns3_err(hw, \"dcb schedule configure failed! ret = %d\", ret);\n \t\treturn ret;\n \t}\n \n@@ -1737,7 +1737,7 @@ hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)\n  * hns3_dcb_pfc_enable - Enable priority flow control\n  * @dev: pointer to ethernet device\n  *\n- * Configures the pfc settings for one porority.\n+ * Configures the pfc settings for one priority.\n  */\n int\n hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)\ndiff --git a/drivers/net/hns3/hns3_dcb.h b/drivers/net/hns3/hns3_dcb.h\nindex e06ec177..b3b990f0 100644\n--- a/drivers/net/hns3/hns3_dcb.h\n+++ b/drivers/net/hns3/hns3_dcb.h\n@@ -86,41 +86,41 @@ struct hns3_nq_to_qs_link_cmd {\n #define HNS3_DCB_SHAP_BS_S_LSH\t21\n \n /*\n- * For more flexible selection of shapping algorithm in different network\n- * engine, the algorithm calculating shapping parameter is moved to firmware to\n- * execute. Bit HNS3_TM_RATE_VLD_B of flag field in hns3_pri_shapping_cmd,\n- * hns3_pg_shapping_cmd or hns3_port_shapping_cmd is set to 1 to require\n- * firmware to recalculate shapping parameters. However, whether the parameters\n+ * For more flexible selection of shaping algorithm in different network\n+ * engine, the algorithm calculating shaping parameter is moved to firmware to\n+ * execute. Bit HNS3_TM_RATE_VLD_B of flag field in hns3_pri_shaping_cmd,\n+ * hns3_pg_shaping_cmd or hns3_port_shaping_cmd is set to 1 to require\n+ * firmware to recalculate shaping parameters. However, whether the parameters\n  * are recalculated depends on the firmware version. If firmware doesn't support\n- * the calculation of shapping parameters, such as on network engine with\n+ * the calculation of shaping parameters, such as on network engine with\n  * revision id 0x21, the value driver calculated will be used to configure to\n  * hardware. On the contrary, firmware ignores configuration of driver\n  * and recalculates the parameter.\n  */\n #define HNS3_TM_RATE_VLD_B\t0\n \n-struct hns3_pri_shapping_cmd {\n+struct hns3_pri_shaping_cmd {\n \tuint8_t pri_id;\n \tuint8_t rsvd[3];\n-\tuint32_t pri_shapping_para;\n+\tuint32_t pri_shaping_para;\n \tuint8_t flag;\n \tuint8_t rsvd1[3];\n \tuint32_t pri_rate;  /* Unit Mbps */\n \tuint8_t rsvd2[8];\n };\n \n-struct hns3_pg_shapping_cmd {\n+struct hns3_pg_shaping_cmd {\n \tuint8_t pg_id;\n \tuint8_t rsvd[3];\n-\tuint32_t pg_shapping_para;\n+\tuint32_t pg_shaping_para;\n \tuint8_t flag;\n \tuint8_t rsvd1[3];\n \tuint32_t pg_rate; /* Unit Mbps */\n \tuint8_t rsvd2[8];\n };\n \n-struct hns3_port_shapping_cmd {\n-\tuint32_t port_shapping_para;\n+struct hns3_port_shaping_cmd {\n+\tuint32_t port_shaping_para;\n \tuint8_t flag;\n \tuint8_t rsvd[3];\n \tuint32_t port_rate;   /* Unit Mbps */\ndiff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c\nindex 0bd12907..fee9c2a0 100644\n--- a/drivers/net/hns3/hns3_ethdev.c\n+++ b/drivers/net/hns3/hns3_ethdev.c\n@@ -386,7 +386,7 @@ hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id)\n \n static void\n hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,\n-\t\t\tbool writen_to_tbl)\n+\t\t\tbool written_to_tbl)\n {\n \tstruct hns3_user_vlan_table *vlan_entry;\n \tstruct hns3_hw *hw = &hns->hw;\n@@ -403,7 +403,7 @@ hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,\n \t\treturn;\n \t}\n \n-\tvlan_entry->hd_tbl_status = writen_to_tbl;\n+\tvlan_entry->hd_tbl_status = written_to_tbl;\n \tvlan_entry->vlan_id = vlan_id;\n \n \tLIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);\n@@ -438,7 +438,7 @@ static int\n hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)\n {\n \tstruct hns3_hw *hw = &hns->hw;\n-\tbool writen_to_tbl = false;\n+\tbool written_to_tbl = false;\n \tint ret = 0;\n \n \t/*\n@@ -458,12 +458,12 @@ hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)\n \t */\n \tif (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {\n \t\tret = hns3_set_port_vlan_filter(hns, vlan_id, on);\n-\t\twriten_to_tbl = true;\n+\t\twritten_to_tbl = true;\n \t}\n \n \tif (ret == 0) {\n \t\tif (on)\n-\t\t\thns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);\n+\t\t\thns3_add_dev_vlan_table(hns, vlan_id, written_to_tbl);\n \t\telse\n \t\t\thns3_rm_dev_vlan_table(hns, vlan_id);\n \t}\n@@ -574,7 +574,7 @@ hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,\n \thns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,\n \t\t     vcfg->vlan2_vlan_prionly ? 1 : 0);\n \n-\t/* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */\n+\t/* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */\n \thns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B,\n \t\t     vcfg->strip_tag1_discard_en ? 1 : 0);\n \thns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B,\n@@ -784,7 +784,7 @@ hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,\n \t\t     vcfg->insert_tag2_en ? 1 : 0);\n \thns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);\n \n-\t/* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */\n+\t/* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */\n \thns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B,\n \t\t     vcfg->tag_shift_mode_en ? 1 : 0);\n \n@@ -2177,7 +2177,7 @@ hns3_get_copper_port_speed_capa(uint32_t supported_speed)\n }\n \n static uint32_t\n-hns3_get_firber_port_speed_capa(uint32_t supported_speed)\n+hns3_get_fiber_port_speed_capa(uint32_t supported_speed)\n {\n \tuint32_t speed_capa = 0;\n \n@@ -2210,7 +2210,7 @@ hns3_get_speed_capa(struct hns3_hw *hw)\n \t\t\thns3_get_copper_port_speed_capa(mac->supported_speed);\n \telse\n \t\tspeed_capa =\n-\t\t\thns3_get_firber_port_speed_capa(mac->supported_speed);\n+\t\t\thns3_get_fiber_port_speed_capa(mac->supported_speed);\n \n \tif (mac->support_autoneg == 0)\n \t\tspeed_capa |= RTE_ETH_LINK_SPEED_FIXED;\n@@ -3420,7 +3420,7 @@ hns3_only_alloc_priv_buff(struct hns3_hw *hw,\n  * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs\n  * @hw: pointer to struct hns3_hw\n  * @buf_alloc: pointer to buffer calculation data\n- * @return: 0: calculate sucessful, negative: fail\n+ * @return: 0: calculate successful, negative: fail\n  */\n static int\n hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)\n@@ -4524,7 +4524,7 @@ hns3_config_all_msix_error(struct hns3_hw *hw, bool enable)\n }\n \n static uint32_t\n-hns3_set_firber_default_support_speed(struct hns3_hw *hw)\n+hns3_set_fiber_default_support_speed(struct hns3_hw *hw)\n {\n \tstruct hns3_mac *mac = &hw->mac;\n \n@@ -4550,14 +4550,14 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw)\n }\n \n /*\n- * Validity of supported_speed for firber and copper media type can be\n+ * Validity of supported_speed for fiber and copper media type can be\n  * guaranteed by the following policy:\n  * Copper:\n  *       Although the initialization of the phy in the firmware may not be\n  *       completed, the firmware can guarantees that the supported_speed is\n  *       an valid value.\n  * Firber:\n- *       If the version of firmware supports the acitive query way of the\n+ *       If the version of firmware supports the active query way of the\n  *       HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained\n  *       through it. If unsupported, use the SFP's speed as the value of the\n  *       supported_speed.\n@@ -4582,7 +4582,7 @@ hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev)\n \t\t */\n \t\tif (mac->supported_speed == 0)\n \t\t\tmac->supported_speed =\n-\t\t\t\thns3_set_firber_default_support_speed(hw);\n+\t\t\t\thns3_set_fiber_default_support_speed(hw);\n \t}\n \n \treturn 0;\n@@ -5327,7 +5327,7 @@ hns3_get_autoneg_fc_mode(struct hns3_hw *hw)\n \n \t/*\n \t * Flow control auto-negotiation is not supported for fiber and\n-\t * backpalne media type.\n+\t * backplane media type.\n \t */\n \tcase HNS3_MEDIA_TYPE_FIBER:\n \tcase HNS3_MEDIA_TYPE_BACKPLANE:\n@@ -6191,7 +6191,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)\n \t}\n \n \t/*\n-\t * FEC mode order defined in hns3 hardware is inconsistend with\n+\t * FEC mode order defined in hns3 hardware is inconsistent with\n \t * that defined in the ethdev library. So the sequence needs\n \t * to be converted.\n \t */\ndiff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h\nindex aa45b312..153e6733 100644\n--- a/drivers/net/hns3/hns3_ethdev.h\n+++ b/drivers/net/hns3/hns3_ethdev.h\n@@ -126,7 +126,7 @@ struct hns3_tc_info {\n \tuint8_t tc_sch_mode;  /* 0: sp; 1: dwrr */\n \tuint8_t pgid;\n \tuint32_t bw_limit;\n-\tuint8_t up_to_tc_map; /* user priority maping on the TC */\n+\tuint8_t up_to_tc_map; /* user priority mapping on the TC */\n };\n \n struct hns3_dcb_info {\n@@ -571,12 +571,12 @@ struct hns3_hw {\n \t/*\n \t * vlan mode.\n \t * value range:\n-\t *      HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHFIT_AND_DISCARD_MODE\n+\t *      HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHIFT_AND_DISCARD_MODE\n \t *\n \t *  - HNS3_SW_SHIFT_AND_DISCARD_MODE\n \t *     For some versions of hardware network engine, because of the\n \t *     hardware limitation, PMD needs to detect the PVID status\n-\t *     to work with haredware to implement PVID-related functions.\n+\t *     to work with hardware to implement PVID-related functions.\n \t *     For example, driver need discard the stripped PVID tag to ensure\n \t *     the PVID will not report to mbuf and shift the inserted VLAN tag\n \t *     to avoid port based VLAN covering it.\n@@ -724,7 +724,7 @@ enum hns3_mp_req_type {\n \tHNS3_MP_REQ_MAX\n };\n \n-/* Pameters for IPC. */\n+/* Parameters for IPC. */\n struct hns3_mp_param {\n \tenum hns3_mp_req_type type;\n \tint port_id;\ndiff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c\nindex 805abd45..5015fe0d 100644\n--- a/drivers/net/hns3/hns3_ethdev_vf.c\n+++ b/drivers/net/hns3/hns3_ethdev_vf.c\n@@ -242,7 +242,7 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,\n \t\tif (ret == -EPERM) {\n \t\t\thns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,\n \t\t\t\t\t      old_addr);\n-\t\t\thns3_warn(hw, \"Has permanet mac addr(%s) for vf\",\n+\t\t\thns3_warn(hw, \"Has permanent mac addr(%s) for vf\",\n \t\t\t\t  mac_str);\n \t\t} else {\n \t\t\thns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,\n@@ -318,7 +318,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,\n \t * 1. The promiscuous/allmulticast mode can be configured successfully\n \t *    only based on the trusted VF device. If based on the non trusted\n \t *    VF device, configuring promiscuous/allmulticast mode will fail.\n-\t *    The hns3 VF device can be confiruged as trusted device by hns3 PF\n+\t *    The hns3 VF device can be configured as trusted device by hns3 PF\n \t *    kernel ethdev driver on the host by the following command:\n \t *      \"ip link set <eth num> vf <vf id> turst on\"\n \t * 2. After the promiscuous mode is configured successfully, hns3 VF PMD\n@@ -330,7 +330,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,\n \t *    filter is still effective even in promiscuous mode. If upper\n \t *    applications don't call rte_eth_dev_vlan_filter API function to\n \t *    set vlan based on VF device, hns3 VF PMD will can't receive\n-\t *    the packets with vlan tag in promiscuoue mode.\n+\t *    the packets with vlan tag in promiscuous mode.\n \t */\n \thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);\n \treq->msg[0] = HNS3_MBX_SET_PROMISC_MODE;\ndiff --git a/drivers/net/hns3/hns3_fdir.c b/drivers/net/hns3/hns3_fdir.c\nindex d043f578..870bde4d 100644\n--- a/drivers/net/hns3/hns3_fdir.c\n+++ b/drivers/net/hns3/hns3_fdir.c\n@@ -67,7 +67,7 @@ enum HNS3_FD_KEY_TYPE {\n \n enum HNS3_FD_META_DATA {\n \tPACKET_TYPE_ID,\n-\tIP_FRAGEMENT,\n+\tIP_FRAGMENT,\n \tROCE_TYPE,\n \tNEXT_KEY,\n \tVLAN_NUMBER,\n@@ -84,7 +84,7 @@ struct key_info {\n \n static const struct key_info meta_data_key_info[] = {\n \t{PACKET_TYPE_ID, 6},\n-\t{IP_FRAGEMENT, 1},\n+\t{IP_FRAGMENT, 1},\n \t{ROCE_TYPE, 1},\n \t{NEXT_KEY, 5},\n \t{VLAN_NUMBER, 2},\ndiff --git a/drivers/net/hns3/hns3_fdir.h b/drivers/net/hns3/hns3_fdir.h\nindex f9efff3b..07b39339 100644\n--- a/drivers/net/hns3/hns3_fdir.h\n+++ b/drivers/net/hns3/hns3_fdir.h\n@@ -139,7 +139,7 @@ struct hns3_fdir_rule {\n \tuint32_t flags;\n \tuint32_t fd_id; /* APP marked unique value for this rule. */\n \tuint8_t action;\n-\t/* VF id, avaiblable when flags with HNS3_RULE_FLAG_VF_ID. */\n+\t/* VF id, available when flags with HNS3_RULE_FLAG_VF_ID. */\n \tuint8_t vf_id;\n \t/*\n \t * equal 0 when action is drop.\ndiff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c\nindex 9f2f9cb6..0dbc3f65 100644\n--- a/drivers/net/hns3/hns3_flow.c\n+++ b/drivers/net/hns3/hns3_flow.c\n@@ -338,7 +338,7 @@ hns3_handle_action_queue_region(struct rte_eth_dev *dev,\n  *\n  * @param actions[in]\n  * @param rule[out]\n- *   NIC specfilc actions derived from the actions.\n+ *   NIC specific actions derived from the actions.\n  * @param error[out]\n  */\n static int\n@@ -369,7 +369,7 @@ hns3_handle_actions(struct rte_eth_dev *dev,\n \t\t * Queue region is implemented by FDIR + RSS in hns3 hardware,\n \t\t * the FDIR's action is one queue region (start_queue_id and\n \t\t * queue_num), then RSS spread packets to the queue region by\n-\t\t * RSS algorigthm.\n+\t\t * RSS algorithm.\n \t\t */\n \t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n \t\t\tret = hns3_handle_action_queue_region(dev, actions,\n@@ -940,7 +940,7 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,\n \tif (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,\n-\t\t\t\t\t  \"Ver/protocal is not supported in NVGRE\");\n+\t\t\t\t\t  \"Ver/protocol is not supported in NVGRE\");\n \n \t/* TNI must be totally masked or not. */\n \tif (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&\n@@ -985,7 +985,7 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,\n \tif (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,\n-\t\t\t\t\t  \"Ver/protocal is not supported in GENEVE\");\n+\t\t\t\t\t  \"Ver/protocol is not supported in GENEVE\");\n \t/* VNI must be totally masked or not. */\n \tif (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&\n \t    memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))\n@@ -1309,7 +1309,7 @@ hns3_rss_input_tuple_supported(struct hns3_hw *hw,\n }\n \n /*\n- * This function is used to parse rss action validatation.\n+ * This function is used to parse rss action validation.\n  */\n static int\n hns3_parse_rss_filter(struct rte_eth_dev *dev,\n@@ -1682,7 +1682,7 @@ hns3_flow_args_check(const struct rte_flow_attr *attr,\n \n /*\n  * Check if the flow rule is supported by hns3.\n- * It only checkes the format. Don't guarantee the rule can be programmed into\n+ * It only checks the format. Don't guarantee the rule can be programmed into\n  * the HW. Because there can be no enough room for the rule.\n  */\n static int\ndiff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c\nindex b3563d46..02028dcd 100644\n--- a/drivers/net/hns3/hns3_mbx.c\n+++ b/drivers/net/hns3/hns3_mbx.c\n@@ -78,14 +78,14 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,\n \tmbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;\n \twhile (wait_time < mbx_time_limit) {\n \t\tif (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {\n-\t\t\thns3_err(hw, \"Don't wait for mbx respone because of \"\n+\t\t\thns3_err(hw, \"Don't wait for mbx response because of \"\n \t\t\t\t \"disable_cmd\");\n \t\t\treturn -EBUSY;\n \t\t}\n \n \t\tif (is_reset_pending(hns)) {\n \t\t\thw->mbx_resp.req_msg_data = 0;\n-\t\t\thns3_err(hw, \"Don't wait for mbx respone because of \"\n+\t\t\thns3_err(hw, \"Don't wait for mbx response because of \"\n \t\t\t\t \"reset pending\");\n \t\t\treturn -EIO;\n \t\t}\ndiff --git a/drivers/net/hns3/hns3_mbx.h b/drivers/net/hns3/hns3_mbx.h\nindex d637bd2b..0172a2e2 100644\n--- a/drivers/net/hns3/hns3_mbx.h\n+++ b/drivers/net/hns3/hns3_mbx.h\n@@ -22,7 +22,7 @@ enum HNS3_MBX_OPCODE {\n \tHNS3_MBX_GET_RETA,              /* (VF -> PF) get RETA */\n \tHNS3_MBX_GET_RSS_KEY,           /* (VF -> PF) get RSS key */\n \tHNS3_MBX_GET_MAC_ADDR,          /* (VF -> PF) get MAC addr */\n-\tHNS3_MBX_PF_VF_RESP,            /* (PF -> VF) generate respone to VF */\n+\tHNS3_MBX_PF_VF_RESP,            /* (PF -> VF) generate response to VF */\n \tHNS3_MBX_GET_BDNUM,             /* (VF -> PF) get BD num */\n \tHNS3_MBX_GET_BUFSIZE,           /* (VF -> PF) get buffer size */\n \tHNS3_MBX_GET_STREAMID,          /* (VF -> PF) get stream id */\ndiff --git a/drivers/net/hns3/hns3_rss.h b/drivers/net/hns3/hns3_rss.h\nindex 6f153a1b..c4121207 100644\n--- a/drivers/net/hns3/hns3_rss.h\n+++ b/drivers/net/hns3/hns3_rss.h\n@@ -41,7 +41,7 @@ struct hns3_rss_tuple_cfg {\n struct hns3_rss_conf {\n \t/* RSS parameters :algorithm, flow_types,  key, queue */\n \tstruct rte_flow_action_rss conf;\n-\tuint8_t hash_algo; /* hash function type definited by hardware */\n+\tuint8_t hash_algo; /* hash function type defined by hardware */\n \tuint8_t key[HNS3_RSS_KEY_SIZE];  /* Hash key */\n \tstruct hns3_rss_tuple_cfg rss_tuple_sets;\n \tuint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];\ndiff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c\nindex f365daad..d240e36e 100644\n--- a/drivers/net/hns3/hns3_rxtx.c\n+++ b/drivers/net/hns3/hns3_rxtx.c\n@@ -1903,7 +1903,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \t * For hns3 VF device, whether it needs to process PVID depends\n \t * on the configuration of PF kernel mode netdevice driver. And the\n \t * related PF configuration is delivered through the mailbox and finally\n-\t * reflectd in port_base_vlan_cfg.\n+\t * reflected in port_base_vlan_cfg.\n \t */\n \tif (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)\n \t\trxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==\n@@ -3043,7 +3043,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \t * For hns3 VF device, whether it needs to process PVID depends\n \t * on the configuration of PF kernel mode netdev driver. And the\n \t * related PF configuration is delivered through the mailbox and finally\n-\t * reflectd in port_base_vlan_cfg.\n+\t * reflected in port_base_vlan_cfg.\n \t */\n \tif (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)\n \t\ttxq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==\n@@ -3208,7 +3208,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,\n \t * in Tx direction based on hns3 network engine. So when the number of\n \t * VLANs in the packets represented by rxm plus the number of VLAN\n \t * offload by hardware such as PVID etc, exceeds two, the packets will\n-\t * be discarded or the original VLAN of the packets will be overwitted\n+\t * be discarded or the original VLAN of the packets will be overwritten\n \t * by hardware. When the PF PVID is enabled by calling the API function\n \t * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3\n \t * PF kernel ether driver, the outer VLAN tag will always be the PVID.\n@@ -3393,7 +3393,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,\n \t\t/*\n \t\t * The inner l2 length of mbuf is the sum of outer l4 length,\n \t\t * tunneling header length and inner l2 length for a tunnel\n-\t\t * packect. But in hns3 tx descriptor, the tunneling header\n+\t\t * packet. But in hns3 tx descriptor, the tunneling header\n \t\t * length is contained in the field of outer L4 length.\n \t\t * Therefore, driver need to calculate the outer L4 length and\n \t\t * inner L2 length.\n@@ -3409,7 +3409,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,\n \t\ttmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,\n \t\t\t\t\tHNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);\n \t\t/*\n-\t\t * For NVGRE tunnel packect, the outer L4 is empty. So only\n+\t\t * For NVGRE tunnel packet, the outer L4 is empty. So only\n \t\t * fill the NVGRE header length to the outer L4 field.\n \t\t */\n \t\ttmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,\n@@ -3452,7 +3452,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,\n \t * mbuf, but for hns3 descriptor, it is contained in the outer L4. So,\n \t * there is a need that switching between them. To avoid multiple\n \t * calculations, the length of the L2 header include the outer and\n-\t * inner, will be filled during the parsing of tunnel packects.\n+\t * inner, will be filled during the parsing of tunnel packets.\n \t */\n \tif (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {\n \t\t/*\n@@ -3632,7 +3632,7 @@ hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,\n \tif (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {\n \t\tstruct rte_udp_hdr *udp_hdr;\n \t\t/*\n-\t\t * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo\n+\t\t * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo\n \t\t * header for TSO packets\n \t\t */\n \t\tif (ol_flags & RTE_MBUF_F_TX_TCP_SEG)\n@@ -3657,7 +3657,7 @@ hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,\n \tif (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {\n \t\tstruct rte_udp_hdr *udp_hdr;\n \t\t/*\n-\t\t * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo\n+\t\t * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo\n \t\t * header for TSO packets\n \t\t */\n \t\tif (ol_flags & RTE_MBUF_F_TX_TCP_SEG)\ndiff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h\nindex 5423568c..e202eb9c 100644\n--- a/drivers/net/hns3/hns3_rxtx.h\n+++ b/drivers/net/hns3/hns3_rxtx.h\n@@ -611,7 +611,7 @@ hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,\n \n \t/*\n \t * If packet len bigger than mtu when recv with no-scattered algorithm,\n-\t * the first n bd will without FE bit, we need process this sisution.\n+\t * the first n bd will without FE bit, we need process this situation.\n \t * Note: we don't need add statistic counter because latest BD which\n \t *       with FE bit will mark HNS3_RXD_L2E_B bit.\n \t */\ndiff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c\nindex 0fe853d6..606b7250 100644\n--- a/drivers/net/hns3/hns3_stats.c\n+++ b/drivers/net/hns3/hns3_stats.c\n@@ -630,7 +630,7 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)\n \n \t\tcnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);\n \t\t/*\n-\t\t * Read hardware and software in adjacent positions to minumize\n+\t\t * Read hardware and software in adjacent positions to minimize\n \t\t * the timing variance.\n \t\t */\n \t\trte_stats->ierrors += rxq->err_stats.l2_errors +\n@@ -1289,7 +1289,7 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev,\n  *   A pointer to an ids array passed by application. This tells which\n  *   statistics values function should retrieve. This parameter\n  *   can be set to NULL if size is 0. In this case function will retrieve\n- *   all avalible statistics.\n+ *   all available statistics.\n  * @param values\n  *   A pointer to a table to be filled with device statistics values.\n  * @param size\ndiff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c\nindex e1089b6b..4fc00cbc 100644\n--- a/drivers/net/hns3/hns3_tm.c\n+++ b/drivers/net/hns3/hns3_tm.c\n@@ -739,7 +739,7 @@ hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,\n }\n \n static void\n-hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev,\n+hns3_tm_nonleaf_level_capabilities_get(struct rte_eth_dev *dev,\n \t\t\t\t       uint32_t level_id,\n \t\t\t\t       struct rte_tm_level_capabilities *cap)\n {\n@@ -818,7 +818,7 @@ hns3_tm_level_capabilities_get(struct rte_eth_dev *dev,\n \tmemset(cap, 0, sizeof(struct rte_tm_level_capabilities));\n \n \tif (level_id != HNS3_TM_NODE_LEVEL_QUEUE)\n-\t\thns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap);\n+\t\thns3_tm_nonleaf_level_capabilities_get(dev, level_id, cap);\n \telse\n \t\thns3_tm_leaf_level_capabilities_get(dev, cap);\n \ndiff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c\nindex c0bfff43..1d417dbf 100644\n--- a/drivers/net/i40e/i40e_ethdev.c\n+++ b/drivers/net/i40e/i40e_ethdev.c\n@@ -2483,7 +2483,7 @@ i40e_dev_start(struct rte_eth_dev *dev)\n \t\tif (ret != I40E_SUCCESS)\n \t\t\tPMD_DRV_LOG(WARNING, \"Fail to set phy mask\");\n \n-\t\t/* Call get_link_info aq commond to enable/disable LSE */\n+\t\t/* Call get_link_info aq command to enable/disable LSE */\n \t\ti40e_dev_link_update(dev, 0);\n \t}\n \n@@ -3555,7 +3555,7 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,\n \t\tcount++;\n \t}\n \n-\t/* Get individiual stats from i40e_hw_port struct */\n+\t/* Get individual stats from i40e_hw_port struct */\n \tfor (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {\n \t\tstrlcpy(xstats_names[count].name,\n \t\t\trte_i40e_hw_port_strings[i].name,\n@@ -3613,7 +3613,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,\n \t\tcount++;\n \t}\n \n-\t/* Get individiual stats from i40e_hw_port struct */\n+\t/* Get individual stats from i40e_hw_port struct */\n \tfor (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {\n \t\txstats[count].value = *(uint64_t *)(((char *)hw_stats) +\n \t\t\trte_i40e_hw_port_strings[i].offset);\n@@ -5544,7 +5544,7 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi)\n \t\t\t\t\t&ets_sla_config, NULL);\n \tif (ret != I40E_SUCCESS) {\n \t\tPMD_DRV_LOG(ERR,\n-\t\t\t\"VSI failed to get TC bandwdith configuration %u\",\n+\t\t\t\"VSI failed to get TC bandwidth configuration %u\",\n \t\t\thw->aq.asq_last_status);\n \t\treturn ret;\n \t}\n@@ -6822,7 +6822,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev)\n  * @param handle\n  *  Pointer to interrupt handle.\n  * @param param\n- *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ *  The address of parameter (struct rte_eth_dev *) registered before.\n  *\n  * @return\n  *  void\n@@ -9719,7 +9719,7 @@ i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,\n \treturn 0;\n }\n \n-/* Check if there exists the ehtertype filter */\n+/* Check if there exists the ethertype filter */\n struct i40e_ethertype_filter *\n i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,\n \t\t\t\tconst struct i40e_ethertype_filter_input *input)\ndiff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h\nindex 2d182f80..a1ebdc09 100644\n--- a/drivers/net/i40e/i40e_ethdev.h\n+++ b/drivers/net/i40e/i40e_ethdev.h\n@@ -897,7 +897,7 @@ struct i40e_tunnel_filter {\n \tTAILQ_ENTRY(i40e_tunnel_filter) rules;\n \tstruct i40e_tunnel_filter_input input;\n \tuint8_t is_to_vf; /* 0 - to PF, 1 - to VF */\n-\tuint16_t vf_id;   /* VF id, avaiblable when is_to_vf is 1. */\n+\tuint16_t vf_id;   /* VF id, available when is_to_vf is 1. */\n \tuint16_t queue; /* Queue assigned to when match */\n };\n \n@@ -966,7 +966,7 @@ struct i40e_tunnel_filter_conf {\n \tuint32_t tenant_id;     /**< Tenant ID to match. VNI, GRE key... */\n \tuint16_t queue_id;      /**< Queue assigned to if match. */\n \tuint8_t is_to_vf;       /**< 0 - to PF, 1 - to VF */\n-\tuint16_t vf_id;         /**< VF id, avaiblable when is_to_vf is 1. */\n+\tuint16_t vf_id;         /**< VF id, available when is_to_vf is 1. */\n };\n \n TAILQ_HEAD(i40e_flow_list, rte_flow);\n@@ -1100,7 +1100,7 @@ struct i40e_vf_msg_cfg {\n \t/*\n \t * If message statistics from a VF exceed the maximal limitation,\n \t * the PF will ignore any new message from that VF for\n-\t * 'ignor_second' time.\n+\t * 'ignore_second' time.\n \t */\n \tuint32_t ignore_second;\n };\n@@ -1257,7 +1257,7 @@ struct i40e_adapter {\n };\n \n /**\n- * Strucute to store private data for each VF representor instance\n+ * Structure to store private data for each VF representor instance\n  */\n struct i40e_vf_representor {\n \tuint16_t switch_domain_id;\n@@ -1265,7 +1265,7 @@ struct i40e_vf_representor {\n \tuint16_t vf_id;\n \t/**< Virtual Function ID */\n \tstruct i40e_adapter *adapter;\n-\t/**< Private data store of assocaiated physical function */\n+\t/**< Private data store of associated physical function */\n \tstruct i40e_eth_stats stats_offset;\n \t/**< Zero-point of VF statistics*/\n };\ndiff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c\nindex df2a5aae..8caedea1 100644\n--- a/drivers/net/i40e/i40e_fdir.c\n+++ b/drivers/net/i40e/i40e_fdir.c\n@@ -142,7 +142,7 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)\n \t\tI40E_QRX_TAIL(rxq->vsi->base_queue);\n \n \trte_wmb();\n-\t/* Init the RX tail regieter. */\n+\t/* Init the RX tail register. */\n \tI40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n \n \treturn err;\n@@ -430,7 +430,7 @@ i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)\n \n \tfor (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {\n \t\tif (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {\n-\t\t\tPMD_DRV_LOG(ERR, \"exceeds maxmial payload limit.\");\n+\t\t\tPMD_DRV_LOG(ERR, \"exceeds maximal payload limit.\");\n \t\t\treturn -EINVAL;\n \t\t}\n \t}\n@@ -438,7 +438,7 @@ i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)\n \tmemset(flex_pit, 0, sizeof(flex_pit));\n \tnum = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);\n \tif (num > I40E_MAX_FLXPLD_FIED) {\n-\t\tPMD_DRV_LOG(ERR, \"exceeds maxmial number of flex fields.\");\n+\t\tPMD_DRV_LOG(ERR, \"exceeds maximal number of flex fields.\");\n \t\treturn -EINVAL;\n \t}\n \tfor (i = 0; i < num; i++) {\n@@ -948,7 +948,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,\n \tuint8_t pctype = fdir_input->pctype;\n \tstruct i40e_customized_pctype *cus_pctype;\n \n-\t/* raw pcket template - just copy contents of the raw packet */\n+\t/* raw packet template - just copy contents of the raw packet */\n \tif (fdir_input->flow_ext.pkt_template) {\n \t\tmemcpy(raw_pkt, fdir_input->flow.raw_flow.packet,\n \t\t       fdir_input->flow.raw_flow.length);\n@@ -1831,7 +1831,7 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,\n \t\t\t\t&check_filter.fdir.input);\n \t\tif (!node) {\n \t\t\tPMD_DRV_LOG(ERR,\n-\t\t\t\t    \"There's no corresponding flow firector filter!\");\n+\t\t\t\t    \"There's no corresponding flow director filter!\");\n \t\t\treturn -EINVAL;\n \t\t}\n \ndiff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c\nindex c9676caa..e0cf9962 100644\n--- a/drivers/net/i40e/i40e_flow.c\n+++ b/drivers/net/i40e/i40e_flow.c\n@@ -3043,7 +3043,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,\n \t\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t   item,\n-\t\t\t\t\t   \"Exceeds maxmial payload limit.\");\n+\t\t\t\t\t   \"Exceeds maximal payload limit.\");\n \t\t\t\treturn -rte_errno;\n \t\t\t}\n \ndiff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c\nindex ccb3924a..2435a8a0 100644\n--- a/drivers/net/i40e/i40e_pf.c\n+++ b/drivers/net/i40e/i40e_pf.c\n@@ -343,7 +343,7 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, uint8_t *msg,\n \t\tvf->request_caps = *(uint32_t *)msg;\n \n \t/* enable all RSS by default,\n-\t * doesn't support hena setting by virtchnnl yet.\n+\t * doesn't support hena setting by virtchnl yet.\n \t */\n \tif (vf->request_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {\n \t\tI40E_WRITE_REG(hw, I40E_VFQF_HENA1(0, vf->vf_idx),\n@@ -725,7 +725,7 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,\n \t\tif ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {\n \t\t\ti40e_pf_config_irq_link_list(vf, map);\n \t\t} else {\n-\t\t\t/* configured queue size excceed limit */\n+\t\t\t/* configured queue size exceed limit */\n \t\t\tret = I40E_ERR_PARAM;\n \t\t\tgoto send_msg;\n \t\t}\ndiff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c\nindex e4cb33dc..9a00a9b7 100644\n--- a/drivers/net/i40e/i40e_rxtx.c\n+++ b/drivers/net/i40e/i40e_rxtx.c\n@@ -609,7 +609,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)\n \t\trxdp[i].read.pkt_addr = dma_addr;\n \t}\n \n-\t/* Update rx tail regsiter */\n+\t/* Update rx tail register */\n \tI40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);\n \n \trxq->rx_free_trigger =\n@@ -995,7 +995,7 @@ i40e_recv_scattered_pkts(void *rx_queue,\n \t * threshold of the queue, advance the Receive Descriptor Tail (RDT)\n \t * register. Update the RDT with the value of the last processed RX\n \t * descriptor minus 1, to guarantee that the RDT register is never\n-\t * equal to the RDH register, which creates a \"full\" ring situtation\n+\t * equal to the RDH register, which creates a \"full\" ring situation\n \t * from the hardware point of view.\n \t */\n \tnb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);\n@@ -1467,7 +1467,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,\n \ti40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));\n \ttxq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));\n \n-\t/* Determin if RS bit needs to be set */\n+\t/* Determine if RS bit needs to be set */\n \tif (txq->tx_tail > txq->tx_next_rs) {\n \t\ttxr[txq->tx_next_rs].cmd_type_offset_bsz |=\n \t\t\trte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<\n@@ -1697,7 +1697,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \t}\n \n \tif (rxq->rx_deferred_start)\n-\t\tPMD_DRV_LOG(WARNING, \"RX queue %u is deferrd start\",\n+\t\tPMD_DRV_LOG(WARNING, \"RX queue %u is deferred start\",\n \t\t\t    rx_queue_id);\n \n \terr = i40e_alloc_rx_queue_mbufs(rxq);\n@@ -1706,7 +1706,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \t\treturn err;\n \t}\n \n-\t/* Init the RX tail regieter. */\n+\t/* Init the RX tail register. */\n \tI40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n \n \terr = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);\n@@ -1771,7 +1771,7 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n \t}\n \n \tif (txq->tx_deferred_start)\n-\t\tPMD_DRV_LOG(WARNING, \"TX queue %u is deferrd start\",\n+\t\tPMD_DRV_LOG(WARNING, \"TX queue %u is deferred start\",\n \t\t\t    tx_queue_id);\n \n \t/*\n@@ -1930,7 +1930,7 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev,\n \t\tPMD_DRV_LOG(ERR, \"Can't use default burst.\");\n \t\treturn -EINVAL;\n \t}\n-\t/* check scatterred conflict */\n+\t/* check scattered conflict */\n \tif (!dev->data->scattered_rx && use_scattered_rx) {\n \t\tPMD_DRV_LOG(ERR, \"Scattered rx is required.\");\n \t\treturn -EINVAL;\n@@ -2014,7 +2014,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n \trxq->offloads = offloads;\n \n-\t/* Allocate the maximun number of RX ring hardware descriptor. */\n+\t/* Allocate the maximum number of RX ring hardware descriptor. */\n \tlen = I40E_MAX_RING_DESC;\n \n \t/**\n@@ -2322,7 +2322,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \t */\n \ttx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?\n \t\ttx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);\n-\t/* force tx_rs_thresh to adapt an aggresive tx_free_thresh */\n+\t/* force tx_rs_thresh to adapt an aggressive tx_free_thresh */\n \ttx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?\n \t\tnb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;\n \tif (tx_conf->tx_rs_thresh > 0)\n@@ -2991,7 +2991,7 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)\n \tif (rxq->max_pkt_len > buf_size)\n \t\tdev_data->scattered_rx = 1;\n \n-\t/* Init the RX tail regieter. */\n+\t/* Init the RX tail register. */\n \tI40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n \n \treturn 0;\ndiff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c\nindex d0bf86df..f78ba994 100644\n--- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c\n+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c\n@@ -430,7 +430,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,\n \t\tdesc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);\n \t\tdesc_to_olflags_v(descs, &rx_pkts[pos]);\n \n-\t\t/* C.4 calc avaialbe number of desc */\n+\t\t/* C.4 calc available number of desc */\n \t\tvar = __builtin_popcountll((vec_ld(0,\n \t\t\t(vector unsigned long *)&staterr)[0]));\n \t\tnb_pkts_recd += var;\ndiff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c\nindex b951ea2d..50746853 100644\n--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c\n+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c\n@@ -151,7 +151,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4],\n \t\t\t\t\t      vreinterpretq_u8_u32(l3_l4e)));\n \t/* then we shift left 1 bit */\n \tl3_l4e = vshlq_n_u32(l3_l4e, 1);\n-\t/* we need to mask out the reduntant bits */\n+\t/* we need to mask out the redundant bits */\n \tl3_l4e = vandq_u32(l3_l4e, cksum_mask);\n \n \tvlan0 = vorrq_u32(vlan0, rss);\n@@ -416,7 +416,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *__rte_restrict rxq,\n \t\t\t\t\t    I40E_UINT16_BIT - 1));\n \t\tstat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);\n \n-\t\t/* C.4 calc avaialbe number of desc */\n+\t\t/* C.4 calc available number of desc */\n \t\tif (unlikely(stat == 0)) {\n \t\t\tnb_pkts_recd += RTE_I40E_DESCS_PER_LOOP;\n \t\t} else {\ndiff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c\nindex 497b2404..3782e805 100644\n--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c\n+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c\n@@ -282,7 +282,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,\n \tl3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);\n \t/* then we shift left 1 bit */\n \tl3_l4e = _mm_slli_epi32(l3_l4e, 1);\n-\t/* we need to mask out the reduntant bits */\n+\t/* we need to mask out the redundant bits */\n \tl3_l4e = _mm_and_si128(l3_l4e, cksum_mask);\n \n \tvlan0 = _mm_or_si128(vlan0, rss);\n@@ -297,7 +297,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,\n \t\t__m128i v_fdir_ol_flags = descs_to_fdir_16b(desc_fltstat,\n \t\t\t\t\t\t\t    descs, rx_pkts);\n #endif\n-\t\t/* OR in ol_flag bits after descriptor speicific extraction */\n+\t\t/* OR in ol_flag bits after descriptor specific extraction */\n \t\tvlan0 = _mm_or_si128(vlan0, v_fdir_ol_flags);\n \t}\n \n@@ -577,7 +577,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,\n \t\t_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,\n \t\t\t\t pkt_mb1);\n \t\tdesc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);\n-\t\t/* C.4 calc avaialbe number of desc */\n+\t\t/* C.4 calc available number of desc */\n \t\tvar = __builtin_popcountll(_mm_cvtsi128_si64(staterr));\n \t\tnb_pkts_recd += var;\n \t\tif (likely(var != RTE_I40E_DESCS_PER_LOOP))\ndiff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c\nindex a492959b..35829a1e 100644\n--- a/drivers/net/i40e/rte_pmd_i40e.c\n+++ b/drivers/net/i40e/rte_pmd_i40e.c\n@@ -1427,7 +1427,7 @@ rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)\n \t/* Get all TCs' bandwidth. */\n \tfor (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {\n \t\tif (veb->enabled_tc & BIT_ULL(i)) {\n-\t\t\t/* For rubust, if bandwidth is 0, use 1 instead. */\n+\t\t\t/* For robust, if bandwidth is 0, use 1 instead. */\n \t\t\tif (veb->bw_info.bw_ets_share_credits[i])\n \t\t\t\tets_data.tc_bw_share_credits[i] =\n \t\t\t\t\tveb->bw_info.bw_ets_share_credits[i];\ndiff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c\nindex 377d7bc7..5944e0fd 100644\n--- a/drivers/net/iavf/iavf_ethdev.c\n+++ b/drivers/net/iavf/iavf_ethdev.c\n@@ -516,7 +516,7 @@ iavf_init_rss(struct iavf_adapter *adapter)\n \t\t\tj = 0;\n \t\tvf->rss_lut[i] = j;\n \t}\n-\t/* send virtchnnl ops to configure rss*/\n+\t/* send virtchnl ops to configure rss*/\n \tret = iavf_configure_rss_lut(adapter);\n \tif (ret)\n \t\treturn ret;\n@@ -831,7 +831,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,\n \t\t\t\t    \"vector %u are mapping to all Rx queues\",\n \t\t\t\t    vf->msix_base);\n \t\t} else {\n-\t\t\t/* If Rx interrupt is reuquired, and we can use\n+\t\t\t/* If Rx interrupt is required, and we can use\n \t\t\t * multi interrupts, then the vec is from 1\n \t\t\t */\n \t\t\tvf->nb_msix =\n@@ -1420,7 +1420,7 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,\n \t}\n \n \trte_memcpy(vf->rss_lut, lut, reta_size);\n-\t/* send virtchnnl ops to configure rss*/\n+\t/* send virtchnl ops to configure rss*/\n \tret = iavf_configure_rss_lut(adapter);\n \tif (ret) /* revert back */\n \t\trte_memcpy(vf->rss_lut, lut, reta_size);\n@@ -1753,7 +1753,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,\n \tstruct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);\n \tstruct iavf_vsi *vsi = &vf->vsi;\n \tstruct virtchnl_eth_stats *pstats = NULL;\n-\tstruct iavf_eth_xstats iavf_xtats = {{0}};\n+\tstruct iavf_eth_xstats iavf_xstats = {{0}};\n \n \tif (n < IAVF_NB_XSTATS)\n \t\treturn IAVF_NB_XSTATS;\n@@ -1766,15 +1766,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,\n \t\treturn 0;\n \n \tiavf_update_stats(vsi, pstats);\n-\tiavf_xtats.eth_stats = *pstats;\n+\tiavf_xstats.eth_stats = *pstats;\n \n \tif (iavf_ipsec_crypto_supported(adapter))\n-\t\tiavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);\n+\t\tiavf_dev_update_ipsec_xstats(dev, &iavf_xstats.ips_stats);\n \n \t/* loop over xstats array and values from pstats */\n \tfor (i = 0; i < IAVF_NB_XSTATS; i++) {\n \t\txstats[i].id = i;\n-\t\txstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +\n+\t\txstats[i].value = *(uint64_t *)(((char *)&iavf_xstats) +\n \t\t\trte_iavf_stats_strings[i].offset);\n \t}\n \ndiff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c\nindex 5e0888ea..d675e0fe 100644\n--- a/drivers/net/iavf/iavf_hash.c\n+++ b/drivers/net/iavf/iavf_hash.c\n@@ -814,7 +814,7 @@ iavf_hash_parse_pattern(const struct rte_flow_item pattern[], uint64_t *phint,\n \n #define REFINE_PROTO_FLD(op, fld) \\\n \tVIRTCHNL_##op##_PROTO_HDR_FIELD(hdr, VIRTCHNL_PROTO_HDR_##fld)\n-#define REPALCE_PROTO_FLD(fld_1, fld_2) \\\n+#define REPLACE_PROTO_FLD(fld_1, fld_2) \\\n do { \\\n \tREFINE_PROTO_FLD(DEL, fld_1);\t\\\n \tREFINE_PROTO_FLD(ADD, fld_2);\t\\\n@@ -925,10 +925,10 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,\n \t\t\t}\n \t\t\tif (rss_type & RTE_ETH_RSS_L3_PRE64) {\n \t\t\t\tif (REFINE_PROTO_FLD(TEST, IPV6_SRC))\n-\t\t\t\t\tREPALCE_PROTO_FLD(IPV6_SRC,\n+\t\t\t\t\tREPLACE_PROTO_FLD(IPV6_SRC,\n \t\t\t\t\t\t\t  IPV6_PREFIX64_SRC);\n \t\t\t\tif (REFINE_PROTO_FLD(TEST, IPV6_DST))\n-\t\t\t\t\tREPALCE_PROTO_FLD(IPV6_DST,\n+\t\t\t\t\tREPLACE_PROTO_FLD(IPV6_DST,\n \t\t\t\t\t\t\t  IPV6_PREFIX64_DST);\n \t\t\t}\n \t\t\tbreak;\ndiff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c\nindex 884169e0..8174cbfc 100644\n--- a/drivers/net/iavf/iavf_ipsec_crypto.c\n+++ b/drivers/net/iavf/iavf_ipsec_crypto.c\n@@ -69,7 +69,7 @@ struct iavf_security_session {\n  *  16B - 3\n  *\n  * but we also need the IV Length for TSO to correctly calculate the total\n- * header length so placing it in the upper 6-bits here for easier reterival.\n+ * header length so placing it in the upper 6-bits here for easier retrieval.\n  */\n static inline uint8_t\n calc_ipsec_desc_iv_len_field(uint16_t iv_sz)\n@@ -448,7 +448,7 @@ sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,\n /**\n  * Send SA add virtual channel request to Inline IPsec driver.\n  *\n- * Inline IPsec driver expects SPI and destination IP adderss to be in host\n+ * Inline IPsec driver expects SPI and destination IP address to be in host\n  * order, but DPDK APIs are network order, therefore we need to do a htonl\n  * conversion of these parameters.\n  */\n@@ -726,7 +726,7 @@ iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,\n /**\n  * Send virtual channel security policy add request to IES driver.\n  *\n- * IES driver expects SPI and destination IP adderss to be in host\n+ * IES driver expects SPI and destination IP address to be in host\n  * order, but DPDK APIs are network order, therefore we need to do a htonl\n  * conversion of these parameters.\n  */\n@@ -994,7 +994,7 @@ iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,\n \trequest->req_id = (uint16_t)0xDEADBEEF;\n \n \t/**\n-\t * SA delete supports deletetion of 1-8 specified SA's or if the flag\n+\t * SA delete supports deletion of 1-8 specified SA's or if the flag\n \t * field is zero, all SA's associated with VF will be deleted.\n \t */\n \tif (sess) {\n@@ -1147,7 +1147,7 @@ iavf_ipsec_crypto_pkt_metadata_set(void *device,\n \tmd = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,\n \t\tstruct iavf_ipsec_crypto_pkt_metadata *);\n \n-\t/* Set immutatable metadata values from session template */\n+\t/* Set immutable metadata values from session template */\n \tmemcpy(md, &iavf_sess->pkt_metadata_template,\n \t\tsizeof(struct iavf_ipsec_crypto_pkt_metadata));\n \n@@ -1334,7 +1334,7 @@ update_aead_capabilities(struct rte_cryptodev_capabilities *scap,\n  * capabilities structure.\n  */\n int\n-iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx\n+iavf_ipsec_crypto_set_security_capabilities(struct iavf_security_ctx\n \t\t*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)\n {\n \tstruct rte_cryptodev_capabilities *capabilities;\n@@ -1355,7 +1355,7 @@ iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx\n \tcapabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;\n \n \t/**\n-\t * Iterate over each virtchl crypto capability by crypto type and\n+\t * Iterate over each virtchnl crypto capability by crypto type and\n \t * algorithm.\n \t */\n \tfor (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {\n@@ -1454,7 +1454,7 @@ iavf_ipsec_crypto_capabilities_get(void *device)\n \t/**\n \t * Update the security capabilities struct with the runtime discovered\n \t * crypto capabilities, except for last element of the array which is\n-\t * the null terminatation\n+\t * the null termination\n \t */\n \tfor (i = 0; i < ((sizeof(iavf_security_capabilities) /\n \t\t\tsizeof(iavf_security_capabilities[0])) - 1); i++) {\n@@ -1524,7 +1524,7 @@ iavf_security_init(struct iavf_adapter *adapter)\n \tif (rc)\n \t\treturn rc;\n \n-\treturn\tiavf_ipsec_crypto_set_security_capabililites(iavf_sctx,\n+\treturn\tiavf_ipsec_crypto_set_security_capabilities(iavf_sctx,\n \t\t\t&capabilities);\n }\n \ndiff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h\nindex 4e4c8798..921ca676 100644\n--- a/drivers/net/iavf/iavf_ipsec_crypto.h\n+++ b/drivers/net/iavf/iavf_ipsec_crypto.h\n@@ -73,7 +73,7 @@ enum iavf_ipsec_iv_len {\n };\n \n \n-/* IPsec Crypto Packet Metaday offload flags */\n+/* IPsec Crypto Packet Metadata offload flags */\n #define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN\t\t(0x1 << 0)\n #define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN\t\t\t(0x1 << 1)\n #define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS\t(0x1 << 2)\n@@ -118,8 +118,8 @@ int iavf_security_init(struct iavf_adapter *adapter);\n /**\n  * Set security capabilities\n  */\n-int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx\n-\t\t*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);\n+int iavf_ipsec_crypto_set_security_capabilities(struct iavf_security_ctx\n+\t\t*iavf_sctx, struct virtchnl_ipsec_cap *virtchnl_capabilities);\n \n \n int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);\ndiff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c\nindex 154472c5..59623ac8 100644\n--- a/drivers/net/iavf/iavf_rxtx.c\n+++ b/drivers/net/iavf/iavf_rxtx.c\n@@ -648,8 +648,8 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\treturn -ENOMEM;\n \t}\n \n-\t/* Allocate the maximun number of RX ring hardware descriptor with\n-\t * a liitle more to support bulk allocate.\n+\t/* Allocate the maximum number of RX ring hardware descriptor with\n+\t * a little more to support bulk allocate.\n \t */\n \tlen = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;\n \tring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),\ndiff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c\nindex 1bac59bf..d582a363 100644\n--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c\n+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c\n@@ -159,7 +159,7 @@ desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],\n \tl3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);\n \t/* then we shift left 1 bit */\n \tl3_l4e = _mm_slli_epi32(l3_l4e, 1);\n-\t/* we need to mask out the reduntant bits */\n+\t/* we need to mask out the redundant bits */\n \tl3_l4e = _mm_and_si128(l3_l4e, cksum_mask);\n \n \tvlan0 = _mm_or_si128(vlan0, rss);\n@@ -613,7 +613,7 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts,\n \t\t_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,\n \t\t\t\t pkt_mb1);\n \t\tdesc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);\n-\t\t/* C.4 calc avaialbe number of desc */\n+\t\t/* C.4 calc available number of desc */\n \t\tvar = __builtin_popcountll(_mm_cvtsi128_si64(staterr));\n \t\tnb_pkts_recd += var;\n \t\tif (likely(var != IAVF_VPMD_DESCS_PER_LOOP))\ndiff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c\nindex 145b0598..76026916 100644\n--- a/drivers/net/iavf/iavf_vchnl.c\n+++ b/drivers/net/iavf/iavf_vchnl.c\n@@ -461,7 +461,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)\n \t    (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR_START &&\n \t     vf->virtchnl_version.minor < VIRTCHNL_VERSION_MINOR_START)) {\n \t\tPMD_INIT_LOG(ERR, \"VIRTCHNL API version should not be lower\"\n-\t\t\t     \" than (%u.%u) to support Adapative VF\",\n+\t\t\t     \" than (%u.%u) to support Adaptive VF\",\n \t\t\t     VIRTCHNL_VERSION_MAJOR_START,\n \t\t\t     VIRTCHNL_VERSION_MAJOR_START);\n \t\treturn -1;\n@@ -1487,7 +1487,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,\n \n \terr = iavf_execute_vf_cmd(adapter, &args, 0);\n \tif (err) {\n-\t\tPMD_DRV_LOG(ERR, \"fail to check flow direcotor rule\");\n+\t\tPMD_DRV_LOG(ERR, \"fail to check flow director rule\");\n \t\treturn err;\n \t}\n \ndiff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c\nindex cca1d7bf..8313a30b 100644\n--- a/drivers/net/ice/ice_dcf.c\n+++ b/drivers/net/ice/ice_dcf.c\n@@ -864,7 +864,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)\n \t\t\tj = 0;\n \t\thw->rss_lut[i] = j;\n \t}\n-\t/* send virtchnnl ops to configure rss*/\n+\t/* send virtchnl ops to configure rss*/\n \tret = ice_dcf_configure_rss_lut(hw);\n \tif (ret)\n \t\treturn ret;\ndiff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c\nindex 28f7f7fb..164d834a 100644\n--- a/drivers/net/ice/ice_dcf_ethdev.c\n+++ b/drivers/net/ice/ice_dcf_ethdev.c\n@@ -203,7 +203,7 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,\n \t\t\t\t    \"vector %u are mapping to all Rx queues\",\n \t\t\t\t    hw->msix_base);\n \t\t} else {\n-\t\t\t/* If Rx interrupt is reuquired, and we can use\n+\t\t\t/* If Rx interrupt is required, and we can use\n \t\t\t * multi interrupts, then the vec is from 1\n \t\t\t */\n \t\t\thw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,\ndiff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c\nindex 13a7a970..c9fd3de2 100644\n--- a/drivers/net/ice/ice_ethdev.c\n+++ b/drivers/net/ice/ice_ethdev.c\n@@ -1264,7 +1264,7 @@ ice_handle_aq_msg(struct rte_eth_dev *dev)\n  * @param handle\n  *  Pointer to interrupt handle.\n  * @param param\n- *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ *  The address of parameter (struct rte_eth_dev *) registered before.\n  *\n  * @return\n  *  void\n@@ -1627,7 +1627,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \t}\n \n \t/* At the beginning, only TC0. */\n-\t/* What we need here is the maximam number of the TX queues.\n+\t/* What we need here is the maximum number of the TX queues.\n \t * Currently vsi->nb_qps means it.\n \t * Correct it if any change.\n \t */\n@@ -3576,7 +3576,7 @@ ice_dev_start(struct rte_eth_dev *dev)\n \t\tgoto rx_err;\n \t}\n \n-\t/* enable Rx interrput and mapping Rx queue to interrupt vector */\n+\t/* enable Rx interrupt and mapping Rx queue to interrupt vector */\n \tif (ice_rxq_intr_setup(dev))\n \t\treturn -EIO;\n \n@@ -3603,7 +3603,7 @@ ice_dev_start(struct rte_eth_dev *dev)\n \n \tice_dev_set_link_up(dev);\n \n-\t/* Call get_link_info aq commond to enable/disable LSE */\n+\t/* Call get_link_info aq command to enable/disable LSE */\n \tice_link_update(dev, 0);\n \n \tpf->adapter_stopped = false;\n@@ -5395,7 +5395,7 @@ ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,\n \t\tcount++;\n \t}\n \n-\t/* Get individiual stats from ice_hw_port struct */\n+\t/* Get individual stats from ice_hw_port struct */\n \tfor (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {\n \t\txstats[count].value =\n \t\t\t*(uint64_t *)((char *)hw_stats +\n@@ -5426,7 +5426,7 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,\n \t\tcount++;\n \t}\n \n-\t/* Get individiual stats from ice_hw_port struct */\n+\t/* Get individual stats from ice_hw_port struct */\n \tfor (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {\n \t\tstrlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,\n \t\t\tsizeof(xstats_names[count].name));\ndiff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c\nindex f6d8564a..f59e83d3 100644\n--- a/drivers/net/ice/ice_rxtx.c\n+++ b/drivers/net/ice/ice_rxtx.c\n@@ -1118,7 +1118,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,\n \trxq->proto_xtr = pf->proto_xtr != NULL ?\n \t\t\t pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;\n \n-\t/* Allocate the maximun number of RX ring hardware descriptor. */\n+\t/* Allocate the maximum number of RX ring hardware descriptor. */\n \tlen = ICE_MAX_RING_DESC;\n \n \t/**\n@@ -1248,7 +1248,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,\n \ttx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?\n \t\t\t\t    tx_conf->tx_free_thresh :\n \t\t\t\t    ICE_DEFAULT_TX_FREE_THRESH);\n-\t/* force tx_rs_thresh to adapt an aggresive tx_free_thresh */\n+\t/* force tx_rs_thresh to adapt an aggressive tx_free_thresh */\n \ttx_rs_thresh =\n \t\t(ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?\n \t\t\tnb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;\n@@ -1714,7 +1714,7 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq)\n \t\trxdp[i].read.pkt_addr = dma_addr;\n \t}\n \n-\t/* Update rx tail regsiter */\n+\t/* Update rx tail register */\n \tICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);\n \n \trxq->rx_free_trigger =\n@@ -1976,7 +1976,7 @@ ice_recv_scattered_pkts(void *rx_queue,\n \t * threshold of the queue, advance the Receive Descriptor Tail (RDT)\n \t * register. Update the RDT with the value of the last processed RX\n \t * descriptor minus 1, to guarantee that the RDT register is never\n-\t * equal to the RDH register, which creates a \"full\" ring situtation\n+\t * equal to the RDH register, which creates a \"full\" ring situation\n \t * from the hardware point of view.\n \t */\n \tnb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);\n@@ -3117,7 +3117,7 @@ tx_xmit_pkts(struct ice_tx_queue *txq,\n \tice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));\n \ttxq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));\n \n-\t/* Determin if RS bit needs to be set */\n+\t/* Determine if RS bit needs to be set */\n \tif (txq->tx_tail > txq->tx_next_rs) {\n \t\ttxr[txq->tx_next_rs].cmd_type_offset_bsz |=\n \t\t\trte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<\ndiff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c\nindex 6cd44c58..fd94cedd 100644\n--- a/drivers/net/ice/ice_rxtx_vec_sse.c\n+++ b/drivers/net/ice/ice_rxtx_vec_sse.c\n@@ -202,7 +202,7 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4],\n \t__m128i l3_l4_mask = _mm_set_epi32(~0x6, ~0x6, ~0x6, ~0x6);\n \t__m128i l3_l4_flags = _mm_and_si128(flags, l3_l4_mask);\n \tflags = _mm_or_si128(l3_l4_flags, l4_outer_flags);\n-\t/* we need to mask out the reduntant bits introduced by RSS or\n+\t/* we need to mask out the redundant bits introduced by RSS or\n \t * VLAN fields.\n \t */\n \tflags = _mm_and_si128(flags, cksum_mask);\n@@ -566,7 +566,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,\n \t\t_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,\n \t\t\t\t pkt_mb0);\n \t\tice_rx_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);\n-\t\t/* C.4 calc avaialbe number of desc */\n+\t\t/* C.4 calc available number of desc */\n \t\tvar = __builtin_popcountll(_mm_cvtsi128_si64(staterr));\n \t\tnb_pkts_recd += var;\n \t\tif (likely(var != ICE_DESCS_PER_LOOP))\ndiff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c\nindex ed29c00d..5b9251f1 100644\n--- a/drivers/net/ice/ice_switch_filter.c\n+++ b/drivers/net/ice/ice_switch_filter.c\n@@ -1649,11 +1649,11 @@ ice_switch_parse_action(struct ice_pf *pf,\n \tstruct ice_vsi *vsi = pf->main_vsi;\n \tstruct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;\n \tconst struct rte_flow_action_queue *act_q;\n-\tconst struct rte_flow_action_rss *act_qgrop;\n+\tconst struct rte_flow_action_rss *act_qgroup;\n \tuint16_t base_queue, i;\n \tconst struct rte_flow_action *action;\n \tenum rte_flow_action_type action_type;\n-\tuint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {\n+\tuint16_t valid_qgroup_number[MAX_QGRP_NUM_TYPE] = {\n \t\t 2, 4, 8, 16, 32, 64, 128};\n \n \tbase_queue = pf->base_queue + vsi->base_queue;\n@@ -1662,30 +1662,30 @@ ice_switch_parse_action(struct ice_pf *pf,\n \t\taction_type = action->type;\n \t\tswitch (action_type) {\n \t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n-\t\t\tact_qgrop = action->conf;\n-\t\t\tif (act_qgrop->queue_num <= 1)\n+\t\t\tact_qgroup = action->conf;\n+\t\t\tif (act_qgroup->queue_num <= 1)\n \t\t\t\tgoto error;\n \t\t\trule_info->sw_act.fltr_act =\n \t\t\t\tICE_FWD_TO_QGRP;\n \t\t\trule_info->sw_act.fwd_id.q_id =\n-\t\t\t\tbase_queue + act_qgrop->queue[0];\n+\t\t\t\tbase_queue + act_qgroup->queue[0];\n \t\t\tfor (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {\n-\t\t\t\tif (act_qgrop->queue_num ==\n-\t\t\t\t\tvalid_qgrop_number[i])\n+\t\t\t\tif (act_qgroup->queue_num ==\n+\t\t\t\t\tvalid_qgroup_number[i])\n \t\t\t\t\tbreak;\n \t\t\t}\n \t\t\tif (i == MAX_QGRP_NUM_TYPE)\n \t\t\t\tgoto error;\n-\t\t\tif ((act_qgrop->queue[0] +\n-\t\t\t\tact_qgrop->queue_num) >\n+\t\t\tif ((act_qgroup->queue[0] +\n+\t\t\t\tact_qgroup->queue_num) >\n \t\t\t\tdev_data->nb_rx_queues)\n \t\t\t\tgoto error1;\n-\t\t\tfor (i = 0; i < act_qgrop->queue_num - 1; i++)\n-\t\t\t\tif (act_qgrop->queue[i + 1] !=\n-\t\t\t\t\tact_qgrop->queue[i] + 1)\n+\t\t\tfor (i = 0; i < act_qgroup->queue_num - 1; i++)\n+\t\t\t\tif (act_qgroup->queue[i + 1] !=\n+\t\t\t\t\tact_qgroup->queue[i] + 1)\n \t\t\t\t\tgoto error2;\n \t\t\trule_info->sw_act.qgrp_size =\n-\t\t\t\tact_qgrop->queue_num;\n+\t\t\t\tact_qgroup->queue_num;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n \t\t\tact_q = action->conf;\ndiff --git a/drivers/net/igc/igc_filter.c b/drivers/net/igc/igc_filter.c\nindex 51fcabfb..bff98df2 100644\n--- a/drivers/net/igc/igc_filter.c\n+++ b/drivers/net/igc/igc_filter.c\n@@ -167,7 +167,7 @@ igc_tuple_filter_lookup(const struct igc_adapter *igc,\n \t\t/* search the filter array */\n \t\tfor (; i < IGC_MAX_NTUPLE_FILTERS; i++) {\n \t\t\tif (igc->ntuple_filters[i].hash_val) {\n-\t\t\t\t/* compare the hase value */\n+\t\t\t\t/* compare the hash value */\n \t\t\t\tif (ntuple->hash_val ==\n \t\t\t\t\tigc->ntuple_filters[i].hash_val)\n \t\t\t\t\t/* filter be found, return index */\ndiff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c\nindex 339b0c9a..e48d5df1 100644\n--- a/drivers/net/igc/igc_txrx.c\n+++ b/drivers/net/igc/igc_txrx.c\n@@ -2099,7 +2099,7 @@ eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt)\n \t\t\t\tsw_ring[tx_id].mbuf = NULL;\n \t\t\t\tsw_ring[tx_id].last_id = tx_id;\n \n-\t\t\t\t/* Move to next segemnt. */\n+\t\t\t\t/* Move to next segment. */\n \t\t\t\ttx_id = sw_ring[tx_id].next_id;\n \t\t\t} while (tx_id != tx_next);\n \n@@ -2133,7 +2133,7 @@ eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt)\n \t\t\t * Walk the list and find the next mbuf, if any.\n \t\t\t */\n \t\t\tdo {\n-\t\t\t\t/* Move to next segemnt. */\n+\t\t\t\t/* Move to next segment. */\n \t\t\t\ttx_id = sw_ring[tx_id].next_id;\n \n \t\t\t\tif (sw_ring[tx_id].mbuf)\ndiff --git a/drivers/net/ionic/ionic_if.h b/drivers/net/ionic/ionic_if.h\nindex 693b44d7..45bad9b0 100644\n--- a/drivers/net/ionic/ionic_if.h\n+++ b/drivers/net/ionic/ionic_if.h\n@@ -2068,7 +2068,7 @@ typedef struct ionic_admin_comp ionic_fw_download_comp;\n  * enum ionic_fw_control_oper - FW control operations\n  * @IONIC_FW_RESET:     Reset firmware\n  * @IONIC_FW_INSTALL:   Install firmware\n- * @IONIC_FW_ACTIVATE:  Acticate firmware\n+ * @IONIC_FW_ACTIVATE:  Activate firmware\n  */\n enum ionic_fw_control_oper {\n \tIONIC_FW_RESET\t\t= 0,\n@@ -2091,7 +2091,7 @@ struct ionic_fw_control_cmd {\n };\n \n /**\n- * struct ionic_fw_control_comp - Firmware control copletion\n+ * struct ionic_fw_control_comp - Firmware control completion\n  * @status:     Status of the command (enum ionic_status_code)\n  * @comp_index: Index in the descriptor ring for which this is the completion\n  * @slot:       Slot where the firmware was installed\n@@ -2878,7 +2878,7 @@ struct ionic_doorbell {\n  *                    and @identity->intr_coal_div to convert from\n  *                    usecs to device units:\n  *\n- *                      coal_init = coal_usecs * coal_mutl / coal_div\n+ *                      coal_init = coal_usecs * coal_mult / coal_div\n  *\n  *                    When an interrupt is sent the interrupt\n  *                    coalescing timer current value\ndiff --git a/drivers/net/ipn3ke/ipn3ke_ethdev.c b/drivers/net/ipn3ke/ipn3ke_ethdev.c\nindex 964506c6..014e438d 100644\n--- a/drivers/net/ipn3ke/ipn3ke_ethdev.c\n+++ b/drivers/net/ipn3ke/ipn3ke_ethdev.c\n@@ -483,7 +483,7 @@ static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)\n \t\t\t\t\tRTE_CACHE_LINE_SIZE,\n \t\t\t\t\tafu_dev->device.numa_node);\n \t\tif (!hw) {\n-\t\t\tIPN3KE_AFU_PMD_ERR(\"failed to allocate hardwart data\");\n+\t\t\tIPN3KE_AFU_PMD_ERR(\"failed to allocate hardware data\");\n \t\t\t\tretval = -ENOMEM;\n \t\t\t\treturn -ENOMEM;\n \t\t}\ndiff --git a/drivers/net/ipn3ke/ipn3ke_ethdev.h b/drivers/net/ipn3ke/ipn3ke_ethdev.h\nindex 041f13d9..58fcc50c 100644\n--- a/drivers/net/ipn3ke/ipn3ke_ethdev.h\n+++ b/drivers/net/ipn3ke/ipn3ke_ethdev.h\n@@ -223,7 +223,7 @@ struct ipn3ke_hw_cap {\n };\n \n /**\n- * Strucute to store private data for each representor instance\n+ * Structure to store private data for each representor instance\n  */\n struct ipn3ke_rpst {\n \tTAILQ_ENTRY(ipn3ke_rpst) next;       /**< Next in device list. */\n@@ -237,7 +237,7 @@ struct ipn3ke_rpst {\n \tuint16_t i40e_pf_eth_port_id;\n \tstruct rte_eth_link ori_linfo;\n \tstruct ipn3ke_tm_internals tm;\n-\t/**< Private data store of assocaiated physical function */\n+\t/**< Private data store of associated physical function */\n \tstruct rte_ether_addr mac_addr;\n };\n \ndiff --git a/drivers/net/ipn3ke/ipn3ke_flow.c b/drivers/net/ipn3ke/ipn3ke_flow.c\nindex f5867ca0..66ae31a5 100644\n--- a/drivers/net/ipn3ke/ipn3ke_flow.c\n+++ b/drivers/net/ipn3ke/ipn3ke_flow.c\n@@ -1299,7 +1299,7 @@ int ipn3ke_flow_init(void *dev)\n \tIPN3KE_AFU_PMD_DEBUG(\"IPN3KE_CLF_LKUP_ENABLE: %x\\n\", data);\n \n \n-\t/* configure rx parse config, settings associatied with VxLAN */\n+\t/* configure rx parse config, settings associated with VxLAN */\n \tIPN3KE_MASK_WRITE_REG(hw,\n \t\t\tIPN3KE_CLF_RX_PARSE_CFG,\n \t\t\t0,\ndiff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c\nindex de325c7d..c9dde1d8 100644\n--- a/drivers/net/ipn3ke/ipn3ke_representor.c\n+++ b/drivers/net/ipn3ke/ipn3ke_representor.c\n@@ -2282,7 +2282,7 @@ ipn3ke_rpst_xstats_get\n \t\tcount++;\n \t}\n \n-\t/* Get individiual stats from ipn3ke_rpst_hw_port */\n+\t/* Get individual stats from ipn3ke_rpst_hw_port */\n \tfor (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) {\n \t\txstats[count].value = *(uint64_t *)(((char *)(&hw_stats)) +\n \t\t\tipn3ke_rpst_hw_port_strings[i].offset);\n@@ -2290,7 +2290,7 @@ ipn3ke_rpst_xstats_get\n \t\tcount++;\n \t}\n \n-\t/* Get individiual stats from ipn3ke_rpst_rxq_pri */\n+\t/* Get individual stats from ipn3ke_rpst_rxq_pri */\n \tfor (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) {\n \t\tfor (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) {\n \t\t\txstats[count].value =\n@@ -2302,7 +2302,7 @@ ipn3ke_rpst_xstats_get\n \t\t}\n \t}\n \n-\t/* Get individiual stats from ipn3ke_rpst_txq_prio */\n+\t/* Get individual stats from ipn3ke_rpst_txq_prio */\n \tfor (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) {\n \t\tfor (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) {\n \t\t\txstats[count].value =\n@@ -2340,7 +2340,7 @@ __rte_unused unsigned int limit)\n \t\tcount++;\n \t}\n \n-\t/* Get individiual stats from ipn3ke_rpst_hw_port */\n+\t/* Get individual stats from ipn3ke_rpst_hw_port */\n \tfor (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) {\n \t\tsnprintf(xstats_names[count].name,\n \t\t\t sizeof(xstats_names[count].name),\n@@ -2349,7 +2349,7 @@ __rte_unused unsigned int limit)\n \t\tcount++;\n \t}\n \n-\t/* Get individiual stats from ipn3ke_rpst_rxq_pri */\n+\t/* Get individual stats from ipn3ke_rpst_rxq_pri */\n \tfor (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) {\n \t\tfor (prio = 0; prio < 8; prio++) {\n \t\t\tsnprintf(xstats_names[count].name,\n@@ -2361,7 +2361,7 @@ __rte_unused unsigned int limit)\n \t\t}\n \t}\n \n-\t/* Get individiual stats from ipn3ke_rpst_txq_prio */\n+\t/* Get individual stats from ipn3ke_rpst_txq_prio */\n \tfor (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) {\n \t\tfor (prio = 0; prio < 8; prio++) {\n \t\t\tsnprintf(xstats_names[count].name,\ndiff --git a/drivers/net/ipn3ke/ipn3ke_tm.c b/drivers/net/ipn3ke/ipn3ke_tm.c\nindex 6a9b98fd..5172f21f 100644\n--- a/drivers/net/ipn3ke/ipn3ke_tm.c\n+++ b/drivers/net/ipn3ke/ipn3ke_tm.c\n@@ -1956,7 +1956,7 @@ ipn3ke_tm_show(struct rte_eth_dev *dev)\n }\n \n static void\n-ipn3ke_tm_show_commmit(struct rte_eth_dev *dev)\n+ipn3ke_tm_show_commit(struct rte_eth_dev *dev)\n {\n \tstruct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);\n \tuint32_t tm_id;\n@@ -2013,7 +2013,7 @@ ipn3ke_tm_hierarchy_commit(struct rte_eth_dev *dev,\n \t\t\t\t\tNULL,\n \t\t\t\t\trte_strerror(EBUSY));\n \n-\tipn3ke_tm_show_commmit(dev);\n+\tipn3ke_tm_show_commit(dev);\n \n \tstatus = ipn3ke_tm_hierarchy_commit_check(dev, error);\n \tif (status) {\ndiff --git a/drivers/net/ipn3ke/meson.build b/drivers/net/ipn3ke/meson.build\nindex 4bf73980..104d2f58 100644\n--- a/drivers/net/ipn3ke/meson.build\n+++ b/drivers/net/ipn3ke/meson.build\n@@ -8,7 +8,7 @@ if is_windows\n endif\n \n #\n-# Add the experimenatal APIs called from this PMD\n+# Add the experimental APIs called from this PMD\n #  rte_eth_switch_domain_alloc()\n #  rte_eth_dev_create()\n #  rte_eth_dev_destroy()\ndiff --git a/drivers/net/ixgbe/ixgbe_bypass.c b/drivers/net/ixgbe/ixgbe_bypass.c\nindex 67ced6c7..94f34a29 100644\n--- a/drivers/net/ixgbe/ixgbe_bypass.c\n+++ b/drivers/net/ixgbe/ixgbe_bypass.c\n@@ -11,7 +11,7 @@\n \n #define\tBYPASS_STATUS_OFF_MASK\t3\n \n-/* Macros to check for invlaid function pointers. */\n+/* Macros to check for invalid function pointers. */\n #define\tFUNC_PTR_OR_ERR_RET(func, retval) do {              \\\n \tif ((func) == NULL) {                               \\\n \t\tPMD_DRV_LOG(ERR, \"%s:%d function not supported\", \\\ndiff --git a/drivers/net/ixgbe/ixgbe_bypass_api.h b/drivers/net/ixgbe/ixgbe_bypass_api.h\nindex 8eb77339..6ef965db 100644\n--- a/drivers/net/ixgbe/ixgbe_bypass_api.h\n+++ b/drivers/net/ixgbe/ixgbe_bypass_api.h\n@@ -135,7 +135,7 @@ static s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)\n  * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.\n  *\n  * If we send a write we can't be sure it took until we can read back\n- * that same register.  It can be a problem as some of the feilds may\n+ * that same register.  It can be a problem as some of the fields may\n  * for valid reasons change between the time wrote the register and\n  * we read it again to verify.  So this function check everything we\n  * can check and then assumes it worked.\n@@ -189,7 +189,7 @@ static bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)\n }\n \n /**\n- *  ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.\n+ *  ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Register.\n  *\n  *  @hw: pointer to hardware structure\n  *  @cmd: The control word we are setting.\ndiff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c\nindex fe61dba8..49bd0abd 100644\n--- a/drivers/net/ixgbe/ixgbe_ethdev.c\n+++ b/drivers/net/ixgbe/ixgbe_ethdev.c\n@@ -2375,7 +2375,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)\n \tif (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)\n \t\tdev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;\n \n-\t/* multipe queue mode checking */\n+\t/* multiple queue mode checking */\n \tret  = ixgbe_check_mq_mode(dev);\n \tif (ret != 0) {\n \t\tPMD_DRV_LOG(ERR, \"ixgbe_check_mq_mode fails with %d.\",\n@@ -2603,7 +2603,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)\n \t\t}\n \t}\n \n-\t/* confiugre msix for sleep until rx interrupt */\n+\t/* configure msix for sleep until rx interrupt */\n \tixgbe_configure_msix(dev);\n \n \t/* initialize transmission unit */\n@@ -2907,7 +2907,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)\n \tif (hw->mac.type == ixgbe_mac_82599EB) {\n #ifdef RTE_LIBRTE_IXGBE_BYPASS\n \t\tif (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {\n-\t\t\t/* Not suported in bypass mode */\n+\t\t\t/* Not supported in bypass mode */\n \t\t\tPMD_INIT_LOG(ERR, \"Set link up is not supported \"\n \t\t\t\t     \"by device id 0x%x\", hw->device_id);\n \t\t\treturn -ENOTSUP;\n@@ -2938,7 +2938,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)\n \tif (hw->mac.type == ixgbe_mac_82599EB) {\n #ifdef RTE_LIBRTE_IXGBE_BYPASS\n \t\tif (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {\n-\t\t\t/* Not suported in bypass mode */\n+\t\t\t/* Not supported in bypass mode */\n \t\t\tPMD_INIT_LOG(ERR, \"Set link down is not supported \"\n \t\t\t\t     \"by device id 0x%x\", hw->device_id);\n \t\t\treturn -ENOTSUP;\n@@ -4603,7 +4603,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)\n  * @param handle\n  *  Pointer to interrupt handle.\n  * @param param\n- *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ *  The address of parameter (struct rte_eth_dev *) registered before.\n  *\n  * @return\n  *  void\n@@ -4659,7 +4659,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)\n  * @param handle\n  *  Pointer to interrupt handle.\n  * @param param\n- *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ *  The address of parameter (struct rte_eth_dev *) registered before.\n  *\n  * @return\n  *  void\n@@ -5921,7 +5921,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)\n \t/* Configure all RX queues of VF */\n \tfor (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {\n \t\t/* Force all queue use vector 0,\n-\t\t * as IXGBE_VF_MAXMSIVECOTR = 1\n+\t\t * as IXGBE_VF_MAXMSIVECTOR = 1\n \t\t */\n \t\tixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);\n \t\trte_intr_vec_list_index_set(intr_handle, q_idx,\n@@ -6256,7 +6256,7 @@ ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,\n  * @param\n  * dev: Pointer to struct rte_eth_dev.\n  * index: the index the filter allocates.\n- * filter: ponter to the filter that will be added.\n+ * filter: pointer to the filter that will be added.\n  * rx_queue: the queue id the filter assigned to.\n  *\n  * @return\n@@ -6872,7 +6872,7 @@ ixgbe_timesync_disable(struct rte_eth_dev *dev)\n \t/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */\n \tIXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);\n \n-\t/* Stop incrementating the System Time registers. */\n+\t/* Stop incrementing the System Time registers. */\n \tIXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);\n \n \treturn 0;\ndiff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h\nindex 83e8b5e5..69e0e82a 100644\n--- a/drivers/net/ixgbe/ixgbe_ethdev.h\n+++ b/drivers/net/ixgbe/ixgbe_ethdev.h\n@@ -68,7 +68,7 @@\n #define IXGBE_LPBK_NONE   0x0 /* Default value. Loopback is disabled. */\n #define IXGBE_LPBK_TX_RX  0x1 /* Tx->Rx loopback operation is enabled. */\n /* X540-X550 specific loopback operations */\n-#define IXGBE_MII_AUTONEG_ENABLE        0x1000 /* Auto-negociation enable (default = 1) */\n+#define IXGBE_MII_AUTONEG_ENABLE        0x1000 /* Auto-negotiation enable (default = 1) */\n \n #define IXGBE_MAX_JUMBO_FRAME_SIZE      0x2600 /* Maximum Jumbo frame size. */\n \ndiff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c\nindex 78940478..834c1b3f 100644\n--- a/drivers/net/ixgbe/ixgbe_fdir.c\n+++ b/drivers/net/ixgbe/ixgbe_fdir.c\n@@ -390,7 +390,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)\n \n \t\tswitch (info->mask.tunnel_type_mask) {\n \t\tcase 0:\n-\t\t\t/* Mask turnnel type */\n+\t\t\t/* Mask tunnel type */\n \t\t\tfdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;\n \t\t\tbreak;\n \t\tcase 1:\ndiff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c\nindex bdc9d479..2fa2e02b 100644\n--- a/drivers/net/ixgbe/ixgbe_flow.c\n+++ b/drivers/net/ixgbe/ixgbe_flow.c\n@@ -135,7 +135,7 @@ const struct rte_flow_action *next_no_void_action(\n }\n \n /**\n- * Please aware there's an asumption for all the parsers.\n+ * Please aware there's an assumption for all the parsers.\n  * rte_flow_item is using big endian, rte_flow_attr and\n  * rte_flow_action are using CPU order.\n  * Because the pattern is used to describe the packets,\n@@ -3261,7 +3261,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,\n \n /**\n  * Check if the flow rule is supported by ixgbe.\n- * It only checkes the format. Don't guarantee the rule can be programmed into\n+ * It only checks the format. Don't guarantee the rule can be programmed into\n  * the HW. Because there can be no enough room for the rule.\n  */\n static int\ndiff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c\nindex 944c9f23..c353ae33 100644\n--- a/drivers/net/ixgbe/ixgbe_ipsec.c\n+++ b/drivers/net/ixgbe/ixgbe_ipsec.c\n@@ -310,7 +310,7 @@ ixgbe_crypto_remove_sa(struct rte_eth_dev *dev,\n \t\t\treturn -1;\n \t\t}\n \n-\t\t/* Disable and clear Rx SPI and key table table entryes*/\n+\t\t/* Disable and clear Rx SPI and key table table entries*/\n \t\treg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);\n \t\tIXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);\n \t\tIXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);\ndiff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c\nindex 9f1bd0a6..c73833b7 100644\n--- a/drivers/net/ixgbe/ixgbe_pf.c\n+++ b/drivers/net/ixgbe/ixgbe_pf.c\n@@ -242,7 +242,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)\n \t/* PFDMA Tx General Switch Control Enables VMDQ loopback */\n \tIXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);\n \n-\t/* clear VMDq map to perment rar 0 */\n+\t/* clear VMDq map to permanent rar 0 */\n \thw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);\n \n \t/* clear VMDq map to scan rar 127 */\ndiff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c\nindex d7c80d42..99e928a2 100644\n--- a/drivers/net/ixgbe/ixgbe_rxtx.c\n+++ b/drivers/net/ixgbe/ixgbe_rxtx.c\n@@ -1954,7 +1954,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t * register.\n \t * Update the RDT with the value of the last processed RX descriptor\n \t * minus 1, to guarantee that the RDT register is never equal to the\n-\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * RDH register, which creates a \"full\" ring situation from the\n \t * hardware point of view...\n \t */\n \tnb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);\n@@ -2303,7 +2303,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,\n \t * register.\n \t * Update the RDT with the value of the last processed RX descriptor\n \t * minus 1, to guarantee that the RDT register is never equal to the\n-\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * RDH register, which creates a \"full\" ring situation from the\n \t * hardware point of view...\n \t */\n \tif (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {\n@@ -2666,7 +2666,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \t */\n \ttx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?\n \t\t\ttx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);\n-\t/* force tx_rs_thresh to adapt an aggresive tx_free_thresh */\n+\t/* force tx_rs_thresh to adapt an aggressive tx_free_thresh */\n \ttx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?\n \t\t\tnb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;\n \tif (tx_conf->tx_rs_thresh > 0)\n@@ -4831,7 +4831,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)\n \t\t\t\t     dev->data->port_id);\n \t\t\tdev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;\n \t\t} else {\n-\t\t\tPMD_INIT_LOG(DEBUG, \"Using Regualr (non-vector, \"\n+\t\t\tPMD_INIT_LOG(DEBUG, \"Using Regular (non-vector, \"\n \t\t\t\t\t    \"single allocation) \"\n \t\t\t\t\t    \"Scattered Rx callback \"\n \t\t\t\t\t    \"(port=%d).\",\n@@ -5170,7 +5170,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)\n \t/*\n \t * Setup the Checksum Register.\n \t * Disable Full-Packet Checksum which is mutually exclusive with RSS.\n-\t * Enable IP/L4 checkum computation by hardware if requested to do so.\n+\t * Enable IP/L4 checksum computation by hardware if requested to do so.\n \t */\n \trxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);\n \trxcsum |= IXGBE_RXCSUM_PCSD;\ndiff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c\nindex 1eed9494..c56f76b3 100644\n--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c\n+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c\n@@ -562,7 +562,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,\n \n \t\tdesc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);\n \n-\t\t/* C.4 calc avaialbe number of desc */\n+\t\t/* C.4 calc available number of desc */\n \t\tvar = __builtin_popcountll(_mm_cvtsi128_si64(staterr));\n \t\tnb_pkts_recd += var;\n \t\tif (likely(var != RTE_IXGBE_DESCS_PER_LOOP))\ndiff --git a/drivers/net/memif/memif_socket.c b/drivers/net/memif/memif_socket.c\nindex 079cf012..42f48a68 100644\n--- a/drivers/net/memif/memif_socket.c\n+++ b/drivers/net/memif/memif_socket.c\n@@ -726,7 +726,7 @@ memif_msg_receive(struct memif_control_channel *cc)\n \t\tbreak;\n \tcase MEMIF_MSG_TYPE_INIT:\n \t\t/*\n-\t\t * This cc does not have an interface asociated with it.\n+\t\t * This cc does not have an interface associated with it.\n \t\t * If suitable interface is found it will be assigned here.\n \t\t */\n \t\tret = memif_msg_receive_init(cc, &msg);\ndiff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c\nindex e3d523af..59cb5a82 100644\n--- a/drivers/net/memif/rte_eth_memif.c\n+++ b/drivers/net/memif/rte_eth_memif.c\n@@ -1026,7 +1026,7 @@ memif_regions_init(struct rte_eth_dev *dev)\n \t\tif (ret < 0)\n \t\t\treturn ret;\n \t} else {\n-\t\t/* create one memory region contaning rings and buffers */\n+\t\t/* create one memory region containing rings and buffers */\n \t\tret = memif_region_init_shm(dev, /* has buffers */ 1);\n \t\tif (ret < 0)\n \t\t\treturn ret;\ndiff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h\nindex 2d0c512f..4023a476 100644\n--- a/drivers/net/mlx4/mlx4.h\n+++ b/drivers/net/mlx4/mlx4.h\n@@ -74,7 +74,7 @@ enum mlx4_mp_req_type {\n \tMLX4_MP_REQ_STOP_RXTX,\n };\n \n-/* Pameters for IPC. */\n+/* Parameters for IPC. */\n struct mlx4_mp_param {\n \tenum mlx4_mp_req_type type;\n \tint port_id;\ndiff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c\nindex d606ec8c..ce74c51c 100644\n--- a/drivers/net/mlx4/mlx4_ethdev.c\n+++ b/drivers/net/mlx4/mlx4_ethdev.c\n@@ -752,7 +752,7 @@ mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)\n  *   Pointer to Ethernet device structure.\n  *\n  * @return\n- *   alwasy 0 on success\n+ *   always 0 on success\n  */\n int\n mlx4_stats_reset(struct rte_eth_dev *dev)\ndiff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex c29fe3d9..36f0fbf0 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -112,7 +112,7 @@ static struct mlx5_indexed_pool_config icfg[] = {\n  *   Pointer to RQ channel object, which includes the channel fd\n  *\n  * @param[out] fd\n- *   The file descriptor (representing the intetrrupt) used in this channel.\n+ *   The file descriptor (representing the interrupt) used in this channel.\n  *\n  * @return\n  *   0 on successfully setting the fd to non-blocking, non-zero otherwise.\n@@ -1743,7 +1743,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \tpriv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);\n \tif (!priv->drop_queue.hrxq)\n \t\tgoto error;\n-\t/* Port representor shares the same max prioirity with pf port. */\n+\t/* Port representor shares the same max priority with pf port. */\n \tif (!priv->sh->flow_priority_check_flag) {\n \t\t/* Supported Verbs flow priority number detection. */\n \t\terr = mlx5_flow_discover_priorities(eth_dev);\n@@ -2300,7 +2300,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,\n \t\t\t\t\t\t/*\n \t\t\t\t\t\t * Force standalone bonding\n \t\t\t\t\t\t * device for ROCE LAG\n-\t\t\t\t\t\t * confgiurations.\n+\t\t\t\t\t\t * configurations.\n \t\t\t\t\t\t */\n \t\t\t\t\t\tlist[ns].info.master = 0;\n \t\t\t\t\t\tlist[ns].info.representor = 0;\n@@ -2637,7 +2637,7 @@ mlx5_os_pci_probe(struct mlx5_common_device *cdev)\n \t\t}\n \t\tif (ret) {\n \t\t\tDRV_LOG(ERR, \"Probe of PCI device \" PCI_PRI_FMT \" \"\n-\t\t\t\t\"aborted due to proding failure of PF %u\",\n+\t\t\t\t\"aborted due to prodding failure of PF %u\",\n \t\t\t\tpci_dev->addr.domain, pci_dev->addr.bus,\n \t\t\t\tpci_dev->addr.devid, pci_dev->addr.function,\n \t\t\t\teth_da.ports[p]);\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex aa5f313c..66a2d9b5 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -350,7 +350,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"rte_flow_ipool\",\n \t},\n-\t[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {\n+\t[MLX5_IPOOL_RSS_EXPANSION_FLOW_ID] = {\n \t\t.size = 0,\n \t\t.need_lock = 1,\n \t\t.type = \"mlx5_flow_rss_id_ipool\",\n@@ -1642,7 +1642,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)\n \t/*\n \t * Free the shared context in last turn, because the cleanup\n \t * routines above may use some shared fields, like\n-\t * mlx5_os_mac_addr_flush() uses ibdev_path for retrieveing\n+\t * mlx5_os_mac_addr_flush() uses ibdev_path for retrieving\n \t * ifindex if Netlink fails.\n \t */\n \tmlx5_free_shared_dev_ctx(priv->sh);\n@@ -1962,7 +1962,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)\n \t\tif (tmp != MLX5_RCM_NONE &&\n \t\t    tmp != MLX5_RCM_LIGHT &&\n \t\t    tmp != MLX5_RCM_AGGR) {\n-\t\t\tDRV_LOG(ERR, \"Unrecognize %s: \\\"%s\\\"\", key, val);\n+\t\t\tDRV_LOG(ERR, \"Unrecognized %s: \\\"%s\\\"\", key, val);\n \t\t\trte_errno = EINVAL;\n \t\t\treturn -rte_errno;\n \t\t}\n@@ -2177,17 +2177,17 @@ mlx5_set_metadata_mask(struct rte_eth_dev *dev)\n \t\tbreak;\n \t}\n \tif (sh->dv_mark_mask && sh->dv_mark_mask != mark)\n-\t\tDRV_LOG(WARNING, \"metadata MARK mask mismatche %08X:%08X\",\n+\t\tDRV_LOG(WARNING, \"metadata MARK mask mismatch %08X:%08X\",\n \t\t\t\t sh->dv_mark_mask, mark);\n \telse\n \t\tsh->dv_mark_mask = mark;\n \tif (sh->dv_meta_mask && sh->dv_meta_mask != meta)\n-\t\tDRV_LOG(WARNING, \"metadata META mask mismatche %08X:%08X\",\n+\t\tDRV_LOG(WARNING, \"metadata META mask mismatch %08X:%08X\",\n \t\t\t\t sh->dv_meta_mask, meta);\n \telse\n \t\tsh->dv_meta_mask = meta;\n \tif (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0)\n-\t\tDRV_LOG(WARNING, \"metadata reg_c0 mask mismatche %08X:%08X\",\n+\t\tDRV_LOG(WARNING, \"metadata reg_c0 mask mismatch %08X:%08X\",\n \t\t\t\t sh->dv_meta_mask, reg_c0);\n \telse\n \t\tsh->dv_regc0_mask = reg_c0;\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 84665310..61287800 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -73,7 +73,7 @@ enum mlx5_ipool_index {\n \tMLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */\n \tMLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */\n \tMLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */\n-\tMLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */\n+\tMLX5_IPOOL_RSS_EXPANSION_FLOW_ID, /* Pool for Queue/RSS flow ID. */\n \tMLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */\n \tMLX5_IPOOL_MTR_POLICY, /* Pool for meter policy resource. */\n \tMLX5_IPOOL_MAX,\n@@ -751,7 +751,7 @@ struct mlx5_flow_meter_policy {\n \t/* drop action for red color. */\n \tuint16_t sub_policy_num;\n \t/* Count sub policy tables, 3 bits per domain. */\n-\tstruct mlx5_flow_meter_sub_policy **sub_policys[MLX5_MTR_DOMAIN_MAX];\n+\tstruct mlx5_flow_meter_sub_policy **sub_policies[MLX5_MTR_DOMAIN_MAX];\n \t/* Sub policy table array must be the end of struct. */\n };\n \n@@ -977,7 +977,7 @@ struct mlx5_flow_id_pool {\n \tuint32_t base_index;\n \t/**< The next index that can be used without any free elements. */\n \tuint32_t *curr; /**< Pointer to the index to pop. */\n-\tuint32_t *last; /**< Pointer to the last element in the empty arrray. */\n+\tuint32_t *last; /**< Pointer to the last element in the empty array. */\n \tuint32_t max_id; /**< Maximum id can be allocated from the pool. */\n };\n \n@@ -1014,7 +1014,7 @@ struct mlx5_dev_txpp {\n \tvoid *pp; /* Packet pacing context. */\n \tuint16_t pp_id; /* Packet pacing context index. */\n \tuint16_t ts_n; /* Number of captured timestamps. */\n-\tuint16_t ts_p; /* Pointer to statisticks timestamp. */\n+\tuint16_t ts_p; /* Pointer to statistics timestamp. */\n \tstruct mlx5_txpp_ts *tsa; /* Timestamps sliding window stats. */\n \tstruct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */\n \tuint32_t sync_lost:1; /* ci/timestamp synchronization lost. */\n@@ -1118,7 +1118,7 @@ struct mlx5_flex_parser_devx {\n \tuint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];\n };\n \n-/* Pattern field dscriptor - how to translate flex pattern into samples. */\n+/* Pattern field descriptor - how to translate flex pattern into samples. */\n __extension__\n struct mlx5_flex_pattern_field {\n \tuint16_t width:6;\n@@ -1169,7 +1169,7 @@ struct mlx5_dev_ctx_shared {\n \t/* Shared DV/DR flow data section. */\n \tuint32_t dv_meta_mask; /* flow META metadata supported mask. */\n \tuint32_t dv_mark_mask; /* flow MARK metadata supported mask. */\n-\tuint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */\n+\tuint32_t dv_regc0_mask; /* available bits of metadata reg_c[0]. */\n \tvoid *fdb_domain; /* FDB Direct Rules name space handle. */\n \tvoid *rx_domain; /* RX Direct Rules name space handle. */\n \tvoid *tx_domain; /* TX Direct Rules name space handle. */\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex f34e4b88..7e5ce5a2 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -1206,7 +1206,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)\n }\n \n /**\n- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive\n+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device\n  * flow.\n  *\n  * @param[in] dev\n@@ -3008,7 +3008,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,\n \tif ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)\n \t\treturn rte_flow_error_set\n \t\t\t(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,\n-\t\t\t\"Geneve TLV opt length exceeeds the limit (31)\");\n+\t\t\t\"Geneve TLV opt length exceeds the limit (31)\");\n \t/* Check if class type and length masks are full. */\n \tif (full_mask.option_class != mask->option_class ||\n \t    full_mask.option_type != mask->option_type ||\n@@ -3957,7 +3957,7 @@ find_graph_root(uint32_t rss_level)\n  *  subflow.\n  *\n  * @param[in] dev_flow\n- *   Pointer the created preifx subflow.\n+ *   Pointer the created prefix subflow.\n  *\n  * @return\n  *   The layers get from prefix subflow.\n@@ -4284,7 +4284,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)\n \t\t[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },\n \t};\n \n-\t/* Fill the register fileds in the flow. */\n+\t/* Fill the register fields in the flow. */\n \tret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);\n \tif (ret < 0)\n \t\treturn NULL;\n@@ -4353,7 +4353,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)\n \t/*\n \t * The copy Flows are not included in any list. There\n \t * ones are referenced from other Flows and can not\n-\t * be applied, removed, deleted in ardbitrary order\n+\t * be applied, removed, deleted in arbitrary order\n \t * by list traversing.\n \t */\n \tmcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,\n@@ -4810,7 +4810,7 @@ flow_create_split_inner(struct rte_eth_dev *dev,\n \t/*\n \t * If dev_flow is as one of the suffix flow, some actions in suffix\n \t * flow may need some user defined item layer flags, and pass the\n-\t * Metadate rxq mark flag to suffix flow as well.\n+\t * Metadata rxq mark flag to suffix flow as well.\n \t */\n \tif (flow_split_info->prefix_layers)\n \t\tdev_flow->handle->layers = flow_split_info->prefix_layers;\n@@ -4933,7 +4933,7 @@ get_meter_sub_policy(struct rte_eth_dev *dev,\n \t\t\tattr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :\n \t\t\t\t(attr->egress ? MLX5_MTR_DOMAIN_EGRESS :\n \t\t\t\t\t\tMLX5_MTR_DOMAIN_INGRESS);\n-\t\tsub_policy = policy->sub_policys[mtr_domain][0];\n+\t\tsub_policy = policy->sub_policies[mtr_domain][0];\n \t}\n \tif (!sub_policy)\n \t\trte_flow_error_set(error, EINVAL,\n@@ -5301,7 +5301,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,\n \t\t * IDs.\n \t\t */\n \t\tmlx5_ipool_malloc(priv->sh->ipool\n-\t\t\t\t  [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);\n+\t\t\t\t  [MLX5_IPOOL_RSS_EXPANSION_FLOW_ID], &flow_id);\n \t\tif (!flow_id)\n \t\t\treturn rte_flow_error_set(error, ENOMEM,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n@@ -5359,7 +5359,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,\n  * @param[out] error\n  *   Perform verbose error reporting if not NULL.\n  * @param[in] encap_idx\n- *   The encap action inndex.\n+ *   The encap action index.\n  *\n  * @return\n  *   0 on success, negative value otherwise\n@@ -5628,7 +5628,7 @@ flow_sample_split_prep(struct rte_eth_dev *dev,\n \t\tif (ret < 0)\n \t\t\treturn ret;\n \t\tmlx5_ipool_malloc(priv->sh->ipool\n-\t\t\t\t  [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);\n+\t\t\t\t  [MLX5_IPOOL_RSS_EXPANSION_FLOW_ID], &tag_id);\n \t\t*set_tag = (struct mlx5_rte_flow_action_set_tag) {\n \t\t\t.id = ret,\n \t\t\t.data = tag_id,\n@@ -5899,7 +5899,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,\n \t * These ones are included into parent flow list and will be destroyed\n \t * by flow_drv_destroy.\n \t */\n-\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],\n+\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANSION_FLOW_ID],\n \t\t\tqrss_id);\n \tmlx5_free(ext_actions);\n \treturn ret;\n@@ -6884,7 +6884,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,\n  * @param type\n  *   Flow type to be flushed.\n  * @param active\n- *   If flushing is called avtively.\n+ *   If flushing is called actively.\n  */\n void\n mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,\n@@ -8531,7 +8531,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,\n  *   Perform verbose error reporting if not NULL. PMDs initialize this\n  *   structure in case of error only.\n  * @return\n- *   0 on success, a nagative value otherwise.\n+ *   0 on success, a negative value otherwise.\n  */\n int\n mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,\n@@ -9009,7 +9009,7 @@ mlx5_get_tof(const struct rte_flow_item *item,\n }\n \n /**\n- * tunnel offload functionalilty is defined for DV environment only\n+ * tunnel offload functionality is defined for DV environment only\n  */\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n __extension__\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 1f54649c..8c131d61 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -598,7 +598,7 @@ struct mlx5_flow_tbl_data_entry {\n \tconst struct mlx5_flow_tunnel *tunnel;\n \tuint32_t group_id;\n \tuint32_t external:1;\n-\tuint32_t tunnel_offload:1; /* Tunnel offlod table or not. */\n+\tuint32_t tunnel_offload:1; /* Tunnel offload table or not. */\n \tuint32_t is_egress:1; /**< Egress table. */\n \tuint32_t is_transfer:1; /**< Transfer table. */\n \tuint32_t dummy:1; /**<  DR table. */\n@@ -696,8 +696,8 @@ struct mlx5_flow_handle {\n \t/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */\n \tvoid *drv_flow; /**< pointer to driver flow object. */\n \tuint32_t split_flow_id:27; /**< Sub flow unique match flow id. */\n-\tuint32_t is_meter_flow_id:1; /**< Indate if flow_id is for meter. */\n-\tuint32_t mark:1; /**< Metadate rxq mark flag. */\n+\tuint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */\n+\tuint32_t mark:1; /**< Metadata rxq mark flag. */\n \tuint32_t fate_action:3; /**< Fate action type. */\n \tuint32_t flex_item; /**< referenced Flex Item bitmask. */\n \tunion {\ndiff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c\nindex ddf4328d..cd01e0c3 100644\n--- a/drivers/net/mlx5/mlx5_flow_aso.c\n+++ b/drivers/net/mlx5/mlx5_flow_aso.c\n@@ -981,13 +981,13 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \tMLX5_SET(conn_track_aso, desg, sack_permitted, profile->selective_ack);\n \tMLX5_SET(conn_track_aso, desg, challenged_acked,\n \t\t profile->challenge_ack_passed);\n-\t/* Heartbeat, retransmission_counter, retranmission_limit_exceeded: 0 */\n+\t/* Heartbeat, retransmission_counter, retransmission_limit_exceeded: 0 */\n \tMLX5_SET(conn_track_aso, desg, heartbeat, 0);\n \tMLX5_SET(conn_track_aso, desg, max_ack_window,\n \t\t profile->max_ack_window);\n \tMLX5_SET(conn_track_aso, desg, retransmission_counter, 0);\n-\tMLX5_SET(conn_track_aso, desg, retranmission_limit_exceeded, 0);\n-\tMLX5_SET(conn_track_aso, desg, retranmission_limit,\n+\tMLX5_SET(conn_track_aso, desg, retransmission_limit_exceeded, 0);\n+\tMLX5_SET(conn_track_aso, desg, retransmission_limit,\n \t\t profile->retransmission_limit);\n \tMLX5_SET(conn_track_aso, desg, reply_direction_tcp_scale,\n \t\t profile->reply_dir.scale);\n@@ -1312,7 +1312,7 @@ mlx5_aso_ct_obj_analyze(struct rte_flow_action_conntrack *profile,\n \tprofile->max_ack_window = MLX5_GET(conn_track_aso, wdata,\n \t\t\t\t\t   max_ack_window);\n \tprofile->retransmission_limit = MLX5_GET(conn_track_aso, wdata,\n-\t\t\t\t\t\t retranmission_limit);\n+\t\t\t\t\t\t retransmission_limit);\n \tprofile->last_window = MLX5_GET(conn_track_aso, wdata, last_win);\n \tprofile->last_direction = MLX5_GET(conn_track_aso, wdata, last_dir);\n \tprofile->last_index = (enum rte_flow_conntrack_tcp_last_index)\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 3da122cb..f43781f7 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -2032,7 +2032,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,\n \t\tif (reg == REG_NON)\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, item,\n-\t\t\t\t\t\"unavalable extended metadata register\");\n+\t\t\t\t\t\"unavailable extended metadata register\");\n \t\tif (reg == REG_B)\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n@@ -3205,7 +3205,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,\n \tif (reg == REG_NON)\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\n-\t\t\t\t\t  \"unavalable extended metadata register\");\n+\t\t\t\t\t  \"unavailable extended metadata register\");\n \tif (reg != REG_A && reg != REG_B) {\n \t\tstruct mlx5_priv *priv = dev->data->dev_private;\n \n@@ -5145,7 +5145,7 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,\n  *   Pointer to error structure.\n  *\n  * @return\n- *   0 on success, a negative errno value otherwise and rte_ernno is set.\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,\n@@ -7858,7 +7858,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \t * - Explicit decap action is prohibited by the tunnel offload API.\n \t * - Drop action in tunnel steer rule is prohibited by the API.\n \t * - Application cannot use MARK action because it's value can mask\n-\t *   tunnel default miss nitification.\n+\t *   tunnel default miss notification.\n \t * - JUMP in tunnel match rule has no support in current PMD\n \t *   implementation.\n \t * - TAG & META are reserved for future uses.\n@@ -9184,7 +9184,7 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,\n \t\t\tgeneve_opt_v->option_type &&\n \t\t\tgeneve_opt_resource->length ==\n \t\t\tgeneve_opt_v->option_len) {\n-\t\t\t/* We already have GENVE TLV option obj allocated. */\n+\t\t\t/* We already have GENEVE TLV option obj allocated. */\n \t\t\t__atomic_fetch_add(&geneve_opt_resource->refcnt, 1,\n \t\t\t\t\t   __ATOMIC_RELAXED);\n \t\t} else {\n@@ -10226,7 +10226,7 @@ __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)\n \t * Check flow matching criteria first, subtract misc5/4 length if flow\n \t * doesn't own misc5/4 parameters. In some old rdma-core releases,\n \t * misc5/4 are not supported, and matcher creation failure is expected\n-\t * w/o subtration. If misc5 is provided, misc4 must be counted in since\n+\t * w/o subtraction. If misc5 is provided, misc4 must be counted in since\n \t * misc5 is right after misc4.\n \t */\n \tif (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {\n@@ -11425,7 +11425,7 @@ flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)\n \t\t\tgoto error;\n \t\t}\n \t}\n-\t/* create a dest array actioin */\n+\t/* create a dest array action */\n \tret = mlx5_os_flow_dr_create_flow_action_dest_array\n \t\t\t\t\t\t(domain,\n \t\t\t\t\t\t resource->num_of_dest,\n@@ -14538,7 +14538,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)\n \t\telse if (dev_handle->split_flow_id &&\n \t\t    !dev_handle->is_meter_flow_id)\n \t\t\tmlx5_ipool_free(priv->sh->ipool\n-\t\t\t\t\t[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],\n+\t\t\t\t\t[MLX5_IPOOL_RSS_EXPANSION_FLOW_ID],\n \t\t\t\t\tdev_handle->split_flow_id);\n \t\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],\n \t\t\t   tmp_idx);\n@@ -15311,7 +15311,7 @@ flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,\n \t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &\n \t\t\tMLX5_MTR_SUB_POLICY_NUM_MASK;\n \t\tfor (j = 0; j < sub_policy_num; j++) {\n-\t\t\tsub_policy = mtr_policy->sub_policys[i][j];\n+\t\t\tsub_policy = mtr_policy->sub_policies[i][j];\n \t\t\tif (sub_policy)\n \t\t\t\t__flow_dv_destroy_sub_policy_rules(dev,\n \t\t\t\t\t\t\t\t   sub_policy);\n@@ -15649,7 +15649,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,\n \t\t\t\t\t(1 << MLX5_SCALE_FLOW_GROUP_BIT),\n \t\t\t\t};\n \t\t\t\tstruct mlx5_flow_meter_sub_policy *sub_policy =\n-\t\t\t\t\tmtr_policy->sub_policys[domain][0];\n+\t\t\t\t\tmtr_policy->sub_policies[domain][0];\n \n \t\t\t\tif (i >= MLX5_MTR_RTE_COLORS)\n \t\t\t\t\treturn -rte_mtr_error_set(error,\n@@ -16504,7 +16504,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,\n \t\t\t\t\t\tnext_fm->policy_id, NULL);\n \t\t\t\t\tMLX5_ASSERT(next_policy);\n \t\t\t\t\tnext_sub_policy =\n-\t\t\t\t\tnext_policy->sub_policys[domain][0];\n+\t\t\t\t\tnext_policy->sub_policies[domain][0];\n \t\t\t\t}\n \t\t\t\ttbl_data =\n \t\t\t\t\tcontainer_of(next_sub_policy->tbl_rsc,\n@@ -16559,7 +16559,7 @@ flow_dv_create_policy_rules(struct rte_eth_dev *dev,\n \t\t\tcontinue;\n \t\t/* Prepare actions list and create policy rules. */\n \t\tif (__flow_dv_create_policy_acts_rules(dev, mtr_policy,\n-\t\t\tmtr_policy->sub_policys[i][0], i)) {\n+\t\t\tmtr_policy->sub_policies[i][0], i)) {\n \t\t\tDRV_LOG(ERR, \"Failed to create policy action \"\n \t\t\t\t\"list per domain.\");\n \t\t\treturn -1;\n@@ -16898,7 +16898,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,\n \t\tfor (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {\n \t\t\tif (rss_desc[i] &&\n \t\t\t    hrxq_idx[i] !=\n-\t\t\t    mtr_policy->sub_policys[domain][j]->rix_hrxq[i])\n+\t\t\t    mtr_policy->sub_policies[domain][j]->rix_hrxq[i])\n \t\t\t\tbreak;\n \t\t}\n \t\tif (i >= MLX5_MTR_RTE_COLORS) {\n@@ -16910,13 +16910,13 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,\n \t\t\tfor (i = 0; i < MLX5_MTR_RTE_COLORS; i++)\n \t\t\t\tmlx5_hrxq_release(dev, hrxq_idx[i]);\n \t\t\t*is_reuse = true;\n-\t\t\treturn mtr_policy->sub_policys[domain][j];\n+\t\t\treturn mtr_policy->sub_policies[domain][j];\n \t\t}\n \t}\n \t/* Create sub policy. */\n-\tif (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {\n+\tif (!mtr_policy->sub_policies[domain][0]->rix_hrxq[0]) {\n \t\t/* Reuse the first pre-allocated sub_policy. */\n-\t\tsub_policy = mtr_policy->sub_policys[domain][0];\n+\t\tsub_policy = mtr_policy->sub_policies[domain][0];\n \t\tsub_policy_idx = sub_policy->idx;\n \t} else {\n \t\tsub_policy = mlx5_ipool_zmalloc\n@@ -16967,7 +16967,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,\n \t\t\t\"rules for ingress domain.\");\n \t\tgoto rss_sub_policy_error;\n \t}\n-\tif (sub_policy != mtr_policy->sub_policys[domain][0]) {\n+\tif (sub_policy != mtr_policy->sub_policies[domain][0]) {\n \t\ti = (mtr_policy->sub_policy_num >>\n \t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &\n \t\t\tMLX5_MTR_SUB_POLICY_NUM_MASK;\n@@ -16975,7 +16975,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,\n \t\t\tDRV_LOG(ERR, \"No free sub-policy slot.\");\n \t\t\tgoto rss_sub_policy_error;\n \t\t}\n-\t\tmtr_policy->sub_policys[domain][i] = sub_policy;\n+\t\tmtr_policy->sub_policies[domain][i] = sub_policy;\n \t\ti++;\n \t\tmtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<\n \t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));\n@@ -16989,11 +16989,11 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,\n rss_sub_policy_error:\n \tif (sub_policy) {\n \t\t__flow_dv_destroy_sub_policy_rules(dev, sub_policy);\n-\t\tif (sub_policy != mtr_policy->sub_policys[domain][0]) {\n+\t\tif (sub_policy != mtr_policy->sub_policies[domain][0]) {\n \t\t\ti = (mtr_policy->sub_policy_num >>\n \t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &\n \t\t\tMLX5_MTR_SUB_POLICY_NUM_MASK;\n-\t\t\tmtr_policy->sub_policys[domain][i] = NULL;\n+\t\t\tmtr_policy->sub_policies[domain][i] = NULL;\n \t\t\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],\n \t\t\t\t\tsub_policy->idx);\n \t\t}\n@@ -17078,11 +17078,11 @@ flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,\n \t\tsub_policy = sub_policies[--j];\n \t\tmtr_policy = sub_policy->main_policy;\n \t\t__flow_dv_destroy_sub_policy_rules(dev, sub_policy);\n-\t\tif (sub_policy != mtr_policy->sub_policys[domain][0]) {\n+\t\tif (sub_policy != mtr_policy->sub_policies[domain][0]) {\n \t\t\tsub_policy_num = (mtr_policy->sub_policy_num >>\n \t\t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &\n \t\t\t\tMLX5_MTR_SUB_POLICY_NUM_MASK;\n-\t\t\tmtr_policy->sub_policys[domain][sub_policy_num - 1] =\n+\t\t\tmtr_policy->sub_policies[domain][sub_policy_num - 1] =\n \t\t\t\t\t\t\t\t\tNULL;\n \t\t\tsub_policy_num--;\n \t\t\tmtr_policy->sub_policy_num &=\n@@ -17157,7 +17157,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,\n \tif (!next_fm->drop_cnt)\n \t\tgoto exit;\n \tcolor_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);\n-\tsub_policy = mtr_policy->sub_policys[domain][0];\n+\tsub_policy = mtr_policy->sub_policies[domain][0];\n \tfor (i = 0; i < RTE_COLORS; i++) {\n \t\tbool rule_exist = false;\n \t\tstruct mlx5_meter_policy_action_container *act_cnt;\n@@ -17184,7 +17184,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,\n \t\tnext_policy = mlx5_flow_meter_policy_find(dev,\n \t\t\t\t\t\tnext_fm->policy_id, NULL);\n \t\tMLX5_ASSERT(next_policy);\n-\t\tnext_sub_policy = next_policy->sub_policys[domain][0];\n+\t\tnext_sub_policy = next_policy->sub_policies[domain][0];\n \t\ttbl_data = container_of(next_sub_policy->tbl_rsc,\n \t\t\t\t\tstruct mlx5_flow_tbl_data_entry, tbl);\n \t\tact_cnt = &mtr_policy->act_cnt[i];\n@@ -17277,13 +17277,13 @@ flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,\n \t\t\tnew_policy_num = sub_policy_num;\n \t\t\tfor (j = 0; j < sub_policy_num; j++) {\n \t\t\t\tsub_policy =\n-\t\t\t\t\tmtr_policy->sub_policys[domain][j];\n+\t\t\t\t\tmtr_policy->sub_policies[domain][j];\n \t\t\t\tif (sub_policy) {\n \t\t\t\t\t__flow_dv_destroy_sub_policy_rules(dev,\n \t\t\t\t\t\tsub_policy);\n \t\t\t\tif (sub_policy !=\n-\t\t\t\t\tmtr_policy->sub_policys[domain][0]) {\n-\t\t\t\t\tmtr_policy->sub_policys[domain][j] =\n+\t\t\t\t\tmtr_policy->sub_policies[domain][0]) {\n+\t\t\t\t\tmtr_policy->sub_policies[domain][j] =\n \t\t\t\t\t\t\t\tNULL;\n \t\t\t\t\tmlx5_ipool_free\n \t\t\t\t(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],\n@@ -17303,7 +17303,7 @@ flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,\n \t\t\t}\n \t\t\tbreak;\n \t\tcase MLX5_FLOW_FATE_QUEUE:\n-\t\t\tsub_policy = mtr_policy->sub_policys[domain][0];\n+\t\t\tsub_policy = mtr_policy->sub_policies[domain][0];\n \t\t\t__flow_dv_destroy_sub_policy_rules(dev,\n \t\t\t\t\t\t\t   sub_policy);\n \t\t\tbreak;\ndiff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c\nindex 64867dc9..9413d4d8 100644\n--- a/drivers/net/mlx5/mlx5_flow_flex.c\n+++ b/drivers/net/mlx5/mlx5_flow_flex.c\n@@ -205,7 +205,7 @@ mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,\n  * @param dev\n  *   Ethernet device to translate flex item on.\n  * @param[in, out] matcher\n- *   Flow matcher to confgiure\n+ *   Flow matcher to configure\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n@@ -457,7 +457,7 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,\n \t\tif (field->offset_shift > 15 || field->offset_shift < 0)\n \t\t\treturn rte_flow_error_set\n \t\t\t\t(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,\n-\t\t\t\t \"header length field shift exceeeds limit\");\n+\t\t\t\t \"header length field shift exceeds limit\");\n \t\tnode->header_length_field_shift\t= field->offset_shift;\n \t\tnode->header_length_field_offset = field->offset_base;\n \t}\ndiff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c\nindex f4a7b697..be693e10 100644\n--- a/drivers/net/mlx5/mlx5_flow_meter.c\n+++ b/drivers/net/mlx5/mlx5_flow_meter.c\n@@ -251,7 +251,7 @@ mlx5_flow_meter_xir_man_exp_calc(int64_t xir, uint8_t *man, uint8_t *exp)\n \tuint8_t _exp = 0;\n \tuint64_t m, e;\n \n-\t/* Special case xir == 0 ? both exp and matissa are 0. */\n+\t/* Special case xir == 0 ? both exp and mantissa are 0. */\n \tif (xir == 0) {\n \t\t*man = 0;\n \t\t*exp = 0;\n@@ -287,7 +287,7 @@ mlx5_flow_meter_xbs_man_exp_calc(uint64_t xbs, uint8_t *man, uint8_t *exp)\n \tint _exp;\n \tdouble _man;\n \n-\t/* Special case xbs == 0 ? both exp and matissa are 0. */\n+\t/* Special case xbs == 0 ? both exp and mantissa are 0. */\n \tif (xbs == 0) {\n \t\t*man = 0;\n \t\t*exp = 0;\n@@ -305,7 +305,7 @@ mlx5_flow_meter_xbs_man_exp_calc(uint64_t xbs, uint8_t *man, uint8_t *exp)\n  * Fill the prm meter parameter.\n  *\n  * @param[in,out] fmp\n- *   Pointer to meter profie to be converted.\n+ *   Pointer to meter profile to be converted.\n  * @param[out] error\n  *   Pointer to the error structure.\n  *\n@@ -696,7 +696,7 @@ __mlx5_flow_meter_policy_delete(struct rte_eth_dev *dev,\n \t\t\tMLX5_MTR_SUB_POLICY_NUM_MASK;\n \t\tif (sub_policy_num) {\n \t\t\tfor (j = 0; j < sub_policy_num; j++) {\n-\t\t\t\tsub_policy = mtr_policy->sub_policys[i][j];\n+\t\t\t\tsub_policy = mtr_policy->sub_policies[i][j];\n \t\t\t\tif (sub_policy)\n \t\t\t\t\tmlx5_ipool_free\n \t\t\t\t\t(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],\n@@ -847,10 +847,10 @@ mlx5_flow_meter_policy_add(struct rte_eth_dev *dev,\n \t\t\tpolicy_idx = sub_policy_idx;\n \t\t\tsub_policy->main_policy_id = 1;\n \t\t}\n-\t\tmtr_policy->sub_policys[i] =\n+\t\tmtr_policy->sub_policies[i] =\n \t\t\t(struct mlx5_flow_meter_sub_policy **)\n \t\t\t((uint8_t *)mtr_policy + policy_size);\n-\t\tmtr_policy->sub_policys[i][0] = sub_policy;\n+\t\tmtr_policy->sub_policies[i][0] = sub_policy;\n \t\tsub_policy_num = (mtr_policy->sub_policy_num >>\n \t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &\n \t\t\tMLX5_MTR_SUB_POLICY_NUM_MASK;\n@@ -1101,7 +1101,7 @@ mlx5_flow_meter_action_modify(struct mlx5_priv *priv,\n \t\t\tif (ret)\n \t\t\t\treturn ret;\n \t\t}\n-\t\t/* Update succeedded modify meter parameters. */\n+\t\t/* Update succeeded modify meter parameters. */\n \t\tif (modify_bits & MLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE)\n \t\t\tfm->active_state = !!active_state;\n \t}\n@@ -1615,7 +1615,7 @@ mlx5_flow_meter_profile_update(struct rte_eth_dev *dev,\n \t\treturn -rte_mtr_error_set(error, -ret,\n \t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_PARAMS,\n \t\t\t\t\t  NULL, \"Failed to update meter\"\n-\t\t\t\t\t  \" parmeters in hardware.\");\n+\t\t\t\t\t  \" parameters in hardware.\");\n \t}\n \told_fmp->ref_cnt--;\n \tfmp->ref_cnt++;\ndiff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c\nindex e8215f73..c8d2f407 100644\n--- a/drivers/net/mlx5/mlx5_rx.c\n+++ b/drivers/net/mlx5/mlx5_rx.c\n@@ -178,7 +178,7 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n  *   Pointer to the device structure.\n  *\n  * @param rx_queue_id\n- *   Rx queue identificatior.\n+ *   Rx queue identification.\n  *\n  * @param mode\n  *   Pointer to the burts mode information.\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex f77d42de..be5f4da1 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -2152,7 +2152,7 @@ mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)\n  *   Number of queues in the array.\n  *\n  * @return\n- *   1 if all queues in indirection table match 0 othrwise.\n+ *   1 if all queues in indirection table match 0 otherwise.\n  */\n static int\n mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,\n@@ -2586,7 +2586,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,\n \t\tif (hrxq->standalone) {\n \t\t\t/*\n \t\t\t * Replacement of indirection table unsupported for\n-\t\t\t * stanalone hrxq objects (used by shared RSS).\n+\t\t\t * standalone hrxq objects (used by shared RSS).\n \t\t\t */\n \t\t\trte_errno = ENOTSUP;\n \t\t\treturn -rte_errno;\ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h\nindex 423e2295..f6e434c1 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h\n@@ -1230,7 +1230,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,\n \t\t\tuint32_t mask = rxq->flow_meta_port_mask;\n \t\t\tuint32_t metadata;\n \n-\t\t\t/* This code is subject for futher optimization. */\n+\t\t\t/* This code is subject for further optimization. */\n \t\t\tmetadata = rte_be_to_cpu_32\n \t\t\t\t(cq[pos].flow_table_metadata) & mask;\n \t\t\t*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =\ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h\nindex b1d16baa..f7bbde4e 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h\n@@ -839,7 +839,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,\n \t\t\t}\n \t\t}\n \t\tif (rxq->dynf_meta) {\n-\t\t\t/* This code is subject for futher optimization. */\n+\t\t\t/* This code is subject for further optimization. */\n \t\t\tint32_t offs = rxq->flow_meta_offset;\n \t\t\tuint32_t mask = rxq->flow_meta_port_mask;\n \ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h\nindex f3d83838..185d2695 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h\n@@ -772,7 +772,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,\n \t\t\t}\n \t\t}\n \t\tif (rxq->dynf_meta) {\n-\t\t\t/* This code is subject for futher optimization. */\n+\t\t\t/* This code is subject for further optimization. */\n \t\t\tint32_t offs = rxq->flow_meta_offset;\n \t\t\tuint32_t mask = rxq->flow_meta_port_mask;\n \ndiff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c\nindex 5492d64c..fd2cf209 100644\n--- a/drivers/net/mlx5/mlx5_tx.c\n+++ b/drivers/net/mlx5/mlx5_tx.c\n@@ -728,7 +728,7 @@ mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n  *   Pointer to the device structure.\n  *\n  * @param tx_queue_id\n- *   Tx queue identificatior.\n+ *   Tx queue identification.\n  *\n  * @param mode\n  *   Pointer to the burts mode information.\ndiff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h\nindex cf3db894..e2dcbafc 100644\n--- a/drivers/net/mlx5/mlx5_utils.h\n+++ b/drivers/net/mlx5/mlx5_utils.h\n@@ -55,7 +55,7 @@ extern int mlx5_logtype;\n \n /*\n  * For the case which data is linked with sequence increased index, the\n- * array table will be more efficiect than hash table once need to serarch\n+ * array table will be more efficient than hash table once need to search\n  * one data entry in large numbers of entries. Since the traditional hash\n  * tables has fixed table size, when huge numbers of data saved to the hash\n  * table, it also comes lots of hash conflict.\ndiff --git a/drivers/net/mlx5/windows/mlx5_flow_os.c b/drivers/net/mlx5/windows/mlx5_flow_os.c\nindex c4d57907..7bb4c459 100644\n--- a/drivers/net/mlx5/windows/mlx5_flow_os.c\n+++ b/drivers/net/mlx5/windows/mlx5_flow_os.c\n@@ -400,7 +400,7 @@ mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)\n \t\t/*\n \t\t * set_specific_workspace when current value is NULL\n \t\t * can happen only once per thread, mark this thread in\n-\t\t * linked list to be able to release reasorces later on.\n+\t\t * linked list to be able to release resources later on.\n \t\t */\n \t\terr = mlx5_add_workspace_to_list(data);\n \t\tif (err) {\ndiff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c\nindex dec4b923..f1437249 100644\n--- a/drivers/net/mlx5/windows/mlx5_os.c\n+++ b/drivers/net/mlx5/windows/mlx5_os.c\n@@ -226,7 +226,7 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)\n  *   Pointer to RQ channel object, which includes the channel fd\n  *\n  * @param[out] fd\n- *   The file descriptor (representing the intetrrupt) used in this channel.\n+ *   The file descriptor (representing the interrupt) used in this channel.\n  *\n  * @return\n  *   0 on successfully setting the fd to non-blocking, non-zero otherwise.\ndiff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c\nindex 10fe6d82..eef016aa 100644\n--- a/drivers/net/mvneta/mvneta_ethdev.c\n+++ b/drivers/net/mvneta/mvneta_ethdev.c\n@@ -247,7 +247,7 @@ mvneta_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \t    (mru + MRVL_NETA_PKT_OFFS > mbuf_data_size)) {\n \t\tmru = mbuf_data_size - MRVL_NETA_PKT_OFFS;\n \t\tmtu = MRVL_NETA_MRU_TO_MTU(mru);\n-\t\tMVNETA_LOG(WARNING, \"MTU too big, max MTU possible limitted by\"\n+\t\tMVNETA_LOG(WARNING, \"MTU too big, max MTU possible limited by\"\n \t\t\t\" current mbuf size: %u. Set MTU to %u, MRU to %u\",\n \t\t\tmbuf_data_size, mtu, mru);\n \t}\ndiff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c\nindex 9c7fe13f..f86701d2 100644\n--- a/drivers/net/mvpp2/mrvl_ethdev.c\n+++ b/drivers/net/mvpp2/mrvl_ethdev.c\n@@ -579,7 +579,7 @@ mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \tif (mru - RTE_ETHER_CRC_LEN + MRVL_PKT_OFFS > mbuf_data_size) {\n \t\tmru = mbuf_data_size + RTE_ETHER_CRC_LEN - MRVL_PKT_OFFS;\n \t\tmtu = MRVL_PP2_MRU_TO_MTU(mru);\n-\t\tMRVL_LOG(WARNING, \"MTU too big, max MTU possible limitted \"\n+\t\tMRVL_LOG(WARNING, \"MTU too big, max MTU possible limited \"\n \t\t\t\"by current mbuf size: %u. Set MTU to %u, MRU to %u\",\n \t\t\tmbuf_data_size, mtu, mru);\n \t}\ndiff --git a/drivers/net/mvpp2/mrvl_qos.c b/drivers/net/mvpp2/mrvl_qos.c\nindex dbfc3b5d..99f0ee56 100644\n--- a/drivers/net/mvpp2/mrvl_qos.c\n+++ b/drivers/net/mvpp2/mrvl_qos.c\n@@ -301,7 +301,7 @@ get_entry_values(const char *entry, uint8_t *tab,\n }\n \n /**\n- * Parse Traffic Class'es mapping configuration.\n+ * Parse Traffic Classes mapping configuration.\n  *\n  * @param file Config file handle.\n  * @param port Which port to look for.\n@@ -736,7 +736,7 @@ mrvl_get_cfg(const char *key __rte_unused, const char *path, void *extra_args)\n \n \t\t/* MRVL_TOK_START_HDR replaces MRVL_TOK_DSA_MODE parameter.\n \t\t * MRVL_TOK_DSA_MODE will be supported for backward\n-\t\t * compatibillity.\n+\t\t * compatibility.\n \t\t */\n \t\tentry = rte_cfgfile_get_entry(file, sec_name,\n \t\t\t\tMRVL_TOK_START_HDR);\ndiff --git a/drivers/net/netvsc/hn_nvs.c b/drivers/net/netvsc/hn_nvs.c\nindex 89dbba6c..a29ac18f 100644\n--- a/drivers/net/netvsc/hn_nvs.c\n+++ b/drivers/net/netvsc/hn_nvs.c\n@@ -229,7 +229,7 @@ hn_nvs_conn_rxbuf(struct hn_data *hv)\n \thv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;\n \n \t/*\n-\t * Pimary queue's rxbuf_info is not allocated at creation time.\n+\t * Primary queue's rxbuf_info is not allocated at creation time.\n \t * Now we can allocate it after we figure out the slotcnt.\n \t */\n \thv->primary->rxbuf_info = rte_calloc(\"HN_RXBUF_INFO\",\ndiff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c\nindex 028f176c..50ca1710 100644\n--- a/drivers/net/netvsc/hn_rxtx.c\n+++ b/drivers/net/netvsc/hn_rxtx.c\n@@ -578,7 +578,7 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,\n \t\trte_iova_t iova;\n \n \t\t/*\n-\t\t * Build an external mbuf that points to recveive area.\n+\t\t * Build an external mbuf that points to receive area.\n \t\t * Use refcount to handle multiple packets in same\n \t\t * receive buffer section.\n \t\t */\n@@ -1031,7 +1031,7 @@ hn_dev_rx_queue_count(void *rx_queue)\n  * returns:\n  *  - -EINVAL               - offset outside of ring\n  *  - RTE_ETH_RX_DESC_AVAIL - no data available yet\n- *  - RTE_ETH_RX_DESC_DONE  - data is waiting in stagin ring\n+ *  - RTE_ETH_RX_DESC_DONE  - data is waiting in staging ring\n  */\n int hn_dev_rx_queue_status(void *arg, uint16_t offset)\n {\ndiff --git a/drivers/net/netvsc/hn_vf.c b/drivers/net/netvsc/hn_vf.c\nindex fead8eba..ebb9c601 100644\n--- a/drivers/net/netvsc/hn_vf.c\n+++ b/drivers/net/netvsc/hn_vf.c\n@@ -103,7 +103,7 @@ static void hn_remove_delayed(void *args)\n \tstruct rte_device *dev = rte_eth_devices[port_id].device;\n \tint ret;\n \n-\t/* Tell VSP to switch data path to synthentic */\n+\t/* Tell VSP to switch data path to synthetic */\n \thn_vf_remove(hv);\n \n \tPMD_DRV_LOG(NOTICE, \"Start to remove port %d\", port_id);\ndiff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h\nindex 0e03948e..394a7628 100644\n--- a/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h\n+++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h\n@@ -63,7 +63,7 @@\n  * Wildcard indicating a CPP read or write action\n  *\n  * The action used will be either read or write depending on whether a read or\n- * write instruction/call is performed on the NFP_CPP_ID.  It is recomended that\n+ * write instruction/call is performed on the NFP_CPP_ID.  It is recommended that\n  * the RW action is used even if all actions to be performed on a NFP_CPP_ID are\n  * known to be only reads or writes. Doing so will in many cases save NFP CPP\n  * internal software resources.\n@@ -405,7 +405,7 @@ int nfp_idstr2meid(int chip_family, const char *s, const char **endptr);\n  * @param chip_family Chip family ID\n  * @param s           A string of format \"iX.anything\" or \"iX\"\n  * @param endptr      If non-NULL, *endptr will point to the trailing\n- *                    striong after the ME ID part of the string, which\n+ *                    string after the ME ID part of the string, which\n  *                    is either an empty string or the first character\n  *                    after the separating period.\n  * @return            The island ID on succes, -1 on error.\n@@ -425,7 +425,7 @@ int nfp_idstr2island(int chip_family, const char *s, const char **endptr);\n  * @param chip_family Chip family ID\n  * @param s           A string of format \"meX.anything\" or \"meX\"\n  * @param endptr      If non-NULL, *endptr will point to the trailing\n- *                    striong after the ME ID part of the string, which\n+ *                    string after the ME ID part of the string, which\n  *                    is either an empty string or the first character\n  *                    after the separating period.\n  * @return            The ME number on succes, -1 on error.\ndiff --git a/drivers/net/nfp/nfpcore/nfp_cppcore.c b/drivers/net/nfp/nfpcore/nfp_cppcore.c\nindex f9104938..37799af5 100644\n--- a/drivers/net/nfp/nfpcore/nfp_cppcore.c\n+++ b/drivers/net/nfp/nfpcore/nfp_cppcore.c\n@@ -202,7 +202,7 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t dest,\n  * @address:    start address on CPP target\n  * @size:   size of area\n  *\n- * Allocate and initilizae a CPP area structure, and lock it down so\n+ * Allocate and initialize a CPP area structure, and lock it down so\n  * that it can be accessed directly.\n  *\n  * NOTE: @address and @size must be 32-bit aligned values.\ndiff --git a/drivers/net/nfp/nfpcore/nfp_nsp.h b/drivers/net/nfp/nfpcore/nfp_nsp.h\nindex c9c7b0d0..e74cdeb1 100644\n--- a/drivers/net/nfp/nfpcore/nfp_nsp.h\n+++ b/drivers/net/nfp/nfpcore/nfp_nsp.h\n@@ -272,7 +272,7 @@ int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes);\n  * @br_primary:   branch id of primary bootloader\n  * @br_secondary: branch id of secondary bootloader\n  * @br_nsp:       branch id of NSP\n- * @primary:      version of primarary bootloader\n+ * @primary:      version of primary bootloader\n  * @secondary:    version id of secondary bootloader\n  * @nsp:          version id of NSP\n  * @sensor_mask:  mask of present sensors available on NIC\ndiff --git a/drivers/net/nfp/nfpcore/nfp_resource.c b/drivers/net/nfp/nfpcore/nfp_resource.c\nindex dd41fa4d..7b5630fd 100644\n--- a/drivers/net/nfp/nfpcore/nfp_resource.c\n+++ b/drivers/net/nfp/nfpcore/nfp_resource.c\n@@ -207,7 +207,7 @@ nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)\n  * nfp_resource_release() - Release a NFP Resource handle\n  * @res:\tNFP Resource handle\n  *\n- * NOTE: This function implictly unlocks the resource handle\n+ * NOTE: This function implicitly unlocks the resource handle\n  */\n void\n nfp_resource_release(struct nfp_resource *res)\ndiff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.c b/drivers/net/nfp/nfpcore/nfp_rtsym.c\nindex cb7d83db..2feca2ed 100644\n--- a/drivers/net/nfp/nfpcore/nfp_rtsym.c\n+++ b/drivers/net/nfp/nfpcore/nfp_rtsym.c\n@@ -236,7 +236,7 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name)\n  * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol\n  * @rtbl:\tNFP RTsym table\n  * @name:\tSymbol name\n- * @error:\tPoniter to error code (optional)\n+ * @error:\tPointer to error code (optional)\n  *\n  * Lookup a symbol, map, read it and return it's value. Value of the symbol\n  * will be interpreted as a simple little-endian unsigned value. Symbol can\ndiff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c\nindex 981592f7..2534c175 100644\n--- a/drivers/net/ngbe/ngbe_ethdev.c\n+++ b/drivers/net/ngbe/ngbe_ethdev.c\n@@ -594,12 +594,12 @@ ngbe_vlan_tpid_set(struct rte_eth_dev *dev,\n {\n \tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n \tint ret = 0;\n-\tuint32_t portctrl, vlan_ext, qinq;\n+\tuint32_t portctl, vlan_ext, qinq;\n \n-\tportctrl = rd32(hw, NGBE_PORTCTL);\n+\tportctl = rd32(hw, NGBE_PORTCTL);\n \n-\tvlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);\n-\tqinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);\n+\tvlan_ext = (portctl & NGBE_PORTCTL_VLANEXT);\n+\tqinq = vlan_ext && (portctl & NGBE_PORTCTL_QINQ);\n \tswitch (vlan_type) {\n \tcase RTE_ETH_VLAN_TYPE_INNER:\n \t\tif (vlan_ext) {\n@@ -983,7 +983,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)\n \t\t}\n \t}\n \n-\t/* confiugre MSI-X for sleep until Rx interrupt */\n+\t/* configure MSI-X for sleep until Rx interrupt */\n \tngbe_configure_msix(dev);\n \n \t/* initialize transmission unit */\n@@ -2641,7 +2641,7 @@ ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,\n \t\twr32(hw, NGBE_IVARMISC, tmp);\n \t} else {\n \t\t/* rx or tx causes */\n-\t\t/* Workround for ICR lost */\n+\t\t/* Workaround for ICR lost */\n \t\tidx = ((16 * (queue & 1)) + (8 * direction));\n \t\ttmp = rd32(hw, NGBE_IVAR(queue >> 1));\n \t\ttmp &= ~(0xFF << idx);\n@@ -2893,7 +2893,7 @@ ngbe_timesync_disable(struct rte_eth_dev *dev)\n \t/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */\n \twr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);\n \n-\t/* Stop incrementating the System Time registers. */\n+\t/* Stop incrementing the System Time registers. */\n \twr32(hw, NGBE_TSTIMEINC, 0);\n \n \treturn 0;\ndiff --git a/drivers/net/ngbe/ngbe_pf.c b/drivers/net/ngbe/ngbe_pf.c\nindex 7f9c04fb..12a18de3 100644\n--- a/drivers/net/ngbe/ngbe_pf.c\n+++ b/drivers/net/ngbe/ngbe_pf.c\n@@ -163,7 +163,7 @@ int ngbe_pf_host_configure(struct rte_eth_dev *eth_dev)\n \n \twr32(hw, NGBE_PSRCTL, NGBE_PSRCTL_LBENA);\n \n-\t/* clear VMDq map to perment rar 0 */\n+\t/* clear VMDq map to permanent rar 0 */\n \thw->mac.clear_vmdq(hw, 0, BIT_MASK32);\n \n \t/* clear VMDq map to scan rar 31 */\ndiff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c\nindex 4f1e368c..b47472eb 100644\n--- a/drivers/net/octeontx/octeontx_ethdev.c\n+++ b/drivers/net/octeontx/octeontx_ethdev.c\n@@ -1090,7 +1090,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,\n \n \t/* Verify queue index */\n \tif (qidx >= dev->data->nb_rx_queues) {\n-\t\tocteontx_log_err(\"QID %d not supporteded (0 - %d available)\\n\",\n+\t\tocteontx_log_err(\"QID %d not supported (0 - %d available)\\n\",\n \t\t\t\tqidx, (dev->data->nb_rx_queues - 1));\n \t\treturn -ENOTSUP;\n \t}\ndiff --git a/drivers/net/octeontx2/otx2_ethdev_irq.c b/drivers/net/octeontx2/otx2_ethdev_irq.c\nindex cc573bb2..f56d5b2a 100644\n--- a/drivers/net/octeontx2/otx2_ethdev_irq.c\n+++ b/drivers/net/octeontx2/otx2_ethdev_irq.c\n@@ -369,7 +369,7 @@ oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev)\n \t\t\t\t \"rc=%d\", rc);\n \t\t\treturn rc;\n \t\t}\n-\t\t/* VFIO vector zero is resereved for misc interrupt so\n+\t\t/* VFIO vector zero is reserved for misc interrupt so\n \t\t * doing required adjustment. (b13bfab4cd)\n \t\t */\n \t\tif (rte_intr_vec_list_index_set(handle, q,\ndiff --git a/drivers/net/octeontx2/otx2_ptp.c b/drivers/net/octeontx2/otx2_ptp.c\nindex abb21305..974018f9 100644\n--- a/drivers/net/octeontx2/otx2_ptp.c\n+++ b/drivers/net/octeontx2/otx2_ptp.c\n@@ -440,7 +440,7 @@ otx2_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock)\n \t/* This API returns the raw PTP HI clock value. Since LFs doesn't\n \t * have direct access to PTP registers and it requires mbox msg\n \t * to AF for this value. In fastpath reading this value for every\n-\t * packet (which involes mbox call) becomes very expensive, hence\n+\t * packet (which involves mbox call) becomes very expensive, hence\n \t * we should be able to derive PTP HI clock value from tsc by\n \t * using freq_mult and clk_delta calculated during configure stage.\n \t */\ndiff --git a/drivers/net/octeontx2/otx2_tx.h b/drivers/net/octeontx2/otx2_tx.h\nindex 4bbd5a39..a2fb7ce3 100644\n--- a/drivers/net/octeontx2/otx2_tx.h\n+++ b/drivers/net/octeontx2/otx2_tx.h\n@@ -61,7 +61,7 @@ otx2_nix_xmit_prepare_tstamp(uint64_t *cmd,  const uint64_t *send_mem_desc,\n \t\t\t/* Retrieving the default desc values */\n \t\t\tcmd[off] = send_mem_desc[6];\n \n-\t\t\t/* Using compiler barier to avoid voilation of C\n+\t\t\t/* Using compiler barrier to avoid violation of C\n \t\t\t * aliasing rules.\n \t\t\t */\n \t\t\trte_compiler_barrier();\n@@ -70,7 +70,7 @@ otx2_nix_xmit_prepare_tstamp(uint64_t *cmd,  const uint64_t *send_mem_desc,\n \t\t/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp\n \t\t * should not be recorded, hence changing the alg type to\n \t\t * NIX_SENDMEMALG_SET and also changing send mem addr field to\n-\t\t * next 8 bytes as it corrpt the actual tx tstamp registered\n+\t\t * next 8 bytes as it corrupts the actual tx tstamp registered\n \t\t * address.\n \t\t */\n \t\tsend_mem->alg = NIX_SENDMEMALG_SETTSTMP - (is_ol_tstamp);\ndiff --git a/drivers/net/octeontx2/otx2_vlan.c b/drivers/net/octeontx2/otx2_vlan.c\nindex cce643b7..359680de 100644\n--- a/drivers/net/octeontx2/otx2_vlan.c\n+++ b/drivers/net/octeontx2/otx2_vlan.c\n@@ -953,7 +953,7 @@ static void nix_vlan_reinstall_vlan_filters(struct rte_eth_dev *eth_dev)\n \tstruct vlan_entry *entry;\n \tint rc;\n \n-\t/* VLAN filters can't be set without setting filtern on */\n+\t/* VLAN filters can't be set without setting filters on */\n \trc = nix_vlan_handle_default_rx_entry(eth_dev, false, true, true);\n \tif (rc) {\n \t\totx2_err(\"Failed to reinstall vlan filters\");\ndiff --git a/drivers/net/octeontx_ep/otx2_ep_vf.c b/drivers/net/octeontx_ep/otx2_ep_vf.c\nindex 0716beb9..94e510ef 100644\n--- a/drivers/net/octeontx_ep/otx2_ep_vf.c\n+++ b/drivers/net/octeontx_ep/otx2_ep_vf.c\n@@ -104,7 +104,7 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)\n \tiq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr +\n \t\t\t   SDP_VF_R_IN_CNTS(iq_no);\n \n-\totx_ep_dbg(\"InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\",\n+\totx_ep_dbg(\"InstQ[%d]:dbell reg @ 0x%p instant_reg @ 0x%p\",\n \t\t   iq_no, iq->doorbell_reg, iq->inst_cnt_reg);\n \n \tdo {\ndiff --git a/drivers/net/octeontx_ep/otx_ep_vf.c b/drivers/net/octeontx_ep/otx_ep_vf.c\nindex c9b91fef..ad7b1ea9 100644\n--- a/drivers/net/octeontx_ep/otx_ep_vf.c\n+++ b/drivers/net/octeontx_ep/otx_ep_vf.c\n@@ -117,7 +117,7 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)\n \tiq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr +\n \t\t\t   OTX_EP_R_IN_CNTS(iq_no);\n \n-\totx_ep_dbg(\"InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\\n\",\n+\totx_ep_dbg(\"InstQ[%d]:dbell reg @ 0x%p instant_reg @ 0x%p\\n\",\n \t\t     iq_no, iq->doorbell_reg, iq->inst_cnt_reg);\n \n \tdo {\ndiff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c\nindex 047010e1..41159d6e 100644\n--- a/drivers/net/pfe/pfe_ethdev.c\n+++ b/drivers/net/pfe/pfe_ethdev.c\n@@ -769,7 +769,7 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)\n \tif (eth_dev == NULL)\n \t\treturn -ENOMEM;\n \n-\t/* Extract pltform data */\n+\t/* Extract platform data */\n \tpfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data;\n \tif (!pfe_info) {\n \t\tPFE_PMD_ERR(\"pfe missing additional platform data\");\n@@ -845,7 +845,7 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)\n }\n \n static int\n-pfe_get_gemac_if_proprties(struct pfe *pfe,\n+pfe_get_gemac_if_properties(struct pfe *pfe,\n \t\t__rte_unused const struct device_node *parent,\n \t\tunsigned int port, unsigned int if_cnt,\n \t\tstruct ls1012a_pfe_platform_data *pdata)\n@@ -1053,7 +1053,7 @@ pmd_pfe_probe(struct rte_vdev_device *vdev)\n \tg_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;\n \n \tfor (ii = 0; ii < interface_count; ii++) {\n-\t\tpfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count,\n+\t\tpfe_get_gemac_if_properties(g_pfe, np, ii, interface_count,\n \t\t\t\t\t   &g_pfe->platform_data);\n \t}\n \ndiff --git a/drivers/net/pfe/pfe_hal.c b/drivers/net/pfe/pfe_hal.c\nindex 41d783db..934dd122 100644\n--- a/drivers/net/pfe/pfe_hal.c\n+++ b/drivers/net/pfe/pfe_hal.c\n@@ -187,7 +187,7 @@ gemac_set_mode(void *base, __rte_unused int mode)\n {\n \tu32 val = readl(base + EMAC_RCNTRL_REG);\n \n-\t/*Remove loopbank*/\n+\t/*Remove loopback*/\n \tval &= ~EMAC_RCNTRL_LOOP;\n \n \t/*Enable flow control and MII mode*/\ndiff --git a/drivers/net/pfe/pfe_hif.c b/drivers/net/pfe/pfe_hif.c\nindex c4a7154b..69b1d0ed 100644\n--- a/drivers/net/pfe/pfe_hif.c\n+++ b/drivers/net/pfe/pfe_hif.c\n@@ -114,9 +114,9 @@ pfe_hif_init_buffers(struct pfe_hif *hif)\n \t\t * results, eth id, queue id from PFE block along with data.\n \t\t * so we have to provide additional memory for each packet to\n \t\t * HIF rx rings so that PFE block can write its headers.\n-\t\t * so, we are giving the data pointor to HIF rings whose\n+\t\t * so, we are giving the data pointer to HIF rings whose\n \t\t * calculation is as below:\n-\t\t * mbuf->data_pointor - Required_header_size\n+\t\t * mbuf->data_pointer - Required_header_size\n \t\t *\n \t\t * We are utilizing the HEADROOM area to receive the PFE\n \t\t * block headers. On packet reception, HIF driver will use\ndiff --git a/drivers/net/pfe/pfe_hif.h b/drivers/net/pfe/pfe_hif.h\nindex 6aaf904b..e8d5ba10 100644\n--- a/drivers/net/pfe/pfe_hif.h\n+++ b/drivers/net/pfe/pfe_hif.h\n@@ -8,7 +8,7 @@\n #define HIF_CLIENT_QUEUES_MAX\t16\n #define HIF_RX_PKT_MIN_SIZE RTE_CACHE_LINE_SIZE\n /*\n- * HIF_TX_DESC_NT value should be always greter than 4,\n+ * HIF_TX_DESC_NT value should be always greater than 4,\n  * Otherwise HIF_TX_POLL_MARK will become zero.\n  */\n #define HIF_RX_DESC_NT\t\t64\ndiff --git a/drivers/net/pfe/pfe_hif_lib.c b/drivers/net/pfe/pfe_hif_lib.c\nindex 799050dc..6fe6d33d 100644\n--- a/drivers/net/pfe/pfe_hif_lib.c\n+++ b/drivers/net/pfe/pfe_hif_lib.c\n@@ -38,7 +38,7 @@ pfe_hif_shm_clean(struct hif_shm *hif_shm)\n  * This function should be called before initializing HIF driver.\n  *\n  * @param[in] hif_shm\t\tShared memory address location in DDR\n- * @rerurn\t\t\t0 - on succes, <0 on fail to initialize\n+ * @return\t\t\t0 - on succes, <0 on fail to initialize\n  */\n int\n pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool)\n@@ -109,9 +109,9 @@ hif_lib_client_release_rx_buffers(struct hif_client_s *client)\n \t\tfor (ii = 0; ii < client->rx_q[qno].size; ii++) {\n \t\t\tbuf = (void *)desc->data;\n \t\t\tif (buf) {\n-\t\t\t/* Data pointor to mbuf pointor calculation:\n+\t\t\t/* Data pointer to mbuf pointer calculation:\n \t\t\t * \"Data - User private data - headroom - mbufsize\"\n-\t\t\t * Actual data pointor given to HIF BDs was\n+\t\t\t * Actual data pointer given to HIF BDs was\n \t\t\t * \"mbuf->data_offset - PFE_PKT_HEADER_SZ\"\n \t\t\t */\n \t\t\t\tbuf = buf + PFE_PKT_HEADER_SZ\n@@ -477,7 +477,7 @@ hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int\n \t      client_id, unsigned int qno,\n \t      u32 client_ctrl)\n {\n-\t/* Optimize the write since the destinaton may be non-cacheable */\n+\t/* Optimize the write since the destination may be non-cacheable */\n \tif (!((unsigned long)pkt_hdr & 0x3)) {\n \t\t((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |\n \t\t\t\t\tclient_id;\ndiff --git a/drivers/net/qede/qede_debug.c b/drivers/net/qede/qede_debug.c\nindex 2297d245..9a2f05ac 100644\n--- a/drivers/net/qede/qede_debug.c\n+++ b/drivers/net/qede/qede_debug.c\n@@ -457,7 +457,7 @@ struct split_type_defs {\n \t(MCP_REG_SCRATCH + \\\n \t offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))\n \n-#define MAX_SW_PLTAFORM_STR_SIZE\t64\n+#define MAX_SW_PLATFORM_STR_SIZE\t64\n \n #define EMPTY_FW_VERSION_STR\t\t\"???_???_???_???\"\n #define EMPTY_FW_IMAGE_STR\t\t\"???????????????\"\n@@ -1227,13 +1227,13 @@ static u32 qed_dump_common_global_params(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t u8 num_specific_global_params)\n {\n \tstruct dbg_tools_data *dev_data = &p_hwfn->dbg_info;\n-\tchar sw_platform_str[MAX_SW_PLTAFORM_STR_SIZE];\n+\tchar sw_platform_str[MAX_SW_PLATFORM_STR_SIZE];\n \tu32 offset = 0;\n \tu8 num_params;\n \n \t/* Fill platform string */\n \tecore_set_platform_str(p_hwfn, sw_platform_str,\n-\t\t\t       MAX_SW_PLTAFORM_STR_SIZE);\n+\t\t\t       MAX_SW_PLATFORM_STR_SIZE);\n \n \t/* Dump global params section header */\n \tnum_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +\n@@ -5983,7 +5983,7 @@ static char *qed_get_buf_ptr(void *buf, u32 offset)\n /* Reads a param from the specified buffer. Returns the number of dwords read.\n  * If the returned str_param is NULL, the param is numeric and its value is\n  * returned in num_param.\n- * Otheriwise, the param is a string and its pointer is returned in str_param.\n+ * Otherwise, the param is a string and its pointer is returned in str_param.\n  */\n static u32 qed_read_param(u32 *dump_buf,\n \t\t\t  const char **param_name,\n@@ -7441,11 +7441,11 @@ qed_print_idle_chk_results_wrapper(struct ecore_hwfn *p_hwfn,\n \t\t\t\t   u32 num_dumped_dwords,\n \t\t\t\t   char *results_buf)\n {\n-\tu32 num_errors, num_warnnings;\n+\tu32 num_errors, num_warnings;\n \n \treturn qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,\n \t\t\t\t\t  results_buf, &num_errors,\n-\t\t\t\t\t  &num_warnnings);\n+\t\t\t\t\t  &num_warnings);\n }\n \n /* Feature meta data lookup table */\n@@ -7558,7 +7558,7 @@ static enum dbg_status format_feature(struct ecore_hwfn *p_hwfn,\n \t\ttext_buf[i] = '\\n';\n \n \n-\t/* Free the old dump_buf and point the dump_buf to the newly allocagted\n+\t/* Free the old dump_buf and point the dump_buf to the newly allocated\n \t * and formatted text buffer.\n \t */\n \tOSAL_VFREE(p_hwfn, feature->dump_buf);\ndiff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c\nindex 3e9aaeec..a1122a29 100644\n--- a/drivers/net/qede/qede_ethdev.c\n+++ b/drivers/net/qede/qede_ethdev.c\n@@ -2338,7 +2338,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)\n \t\tif (fp->rxq != NULL) {\n \t\t\tbufsz = (uint16_t)rte_pktmbuf_data_room_size(\n \t\t\t\tfp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;\n-\t\t\t/* cache align the mbuf size to simplfy rx_buf_size\n+\t\t\t/* cache align the mbuf size to simplify rx_buf_size\n \t\t\t * calculation\n \t\t\t */\n \t\t\tbufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);\ndiff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c\nindex c0eeea89..7088c57b 100644\n--- a/drivers/net/qede/qede_rxtx.c\n+++ b/drivers/net/qede/qede_rxtx.c\n@@ -90,7 +90,7 @@ static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)\n  *    (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT\n  * 3) In regular mode - minimum rx_buf_size should be\n  *    (MTU + Maximum L2 Header Size + 2)\n- *    In above cases +2 corrosponds to 2 bytes padding in front of L2\n+ *    In above cases +2 corresponds to 2 bytes padding in front of L2\n  *    header.\n  * 4) rx_buf_size should be cacheline-size aligned. So considering\n  *    criteria 1, we need to adjust the size to floor instead of ceil,\n@@ -106,7 +106,7 @@ qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,\n \n \tif (dev->data->scattered_rx) {\n \t\t/* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of\n-\t\t * bufferes can be used for single packet. So need to make sure\n+\t\t * buffers can be used for single packet. So need to make sure\n \t\t * mbuf size is sufficient enough for this.\n \t\t */\n \t\tif ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <\n@@ -247,7 +247,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,\n \n \t/* Fix up RX buffer size */\n \tbufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;\n-\t/* cache align the mbuf size to simplfy rx_buf_size calculation */\n+\t/* cache align the mbuf size to simplify rx_buf_size calculation */\n \tbufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);\n \tif ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)\t||\n \t    (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {\n@@ -1745,7 +1745,7 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\t}\n \t}\n \n-\t/* Request number of bufferes to be allocated in next loop */\n+\t/* Request number of buffers to be allocated in next loop */\n \trxq->rx_alloc_count = rx_alloc_count;\n \n \trxq->rcv_pkts += rx_pkt;\n@@ -2042,7 +2042,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\t}\n \t}\n \n-\t/* Request number of bufferes to be allocated in next loop */\n+\t/* Request number of buffers to be allocated in next loop */\n \trxq->rx_alloc_count = rx_alloc_count;\n \n \trxq->rcv_pkts += rx_pkt;\n@@ -2506,7 +2506,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \t\t\t\t/* Inner L2 header size in two byte words */\n \t\t\t\tinner_l2_hdr_size = (mbuf->l2_len -\n \t\t\t\t\t\tMPLSINUDP_HDR_SIZE) / 2;\n-\t\t\t\t/* Inner L4 header offset from the beggining\n+\t\t\t\t/* Inner L4 header offset from the beginning\n \t\t\t\t * of inner packet in two byte words\n \t\t\t\t */\n \t\t\t\tinner_l4_hdr_offset = (mbuf->l2_len -\ndiff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h\nindex 754efe79..11ed1d9b 100644\n--- a/drivers/net/qede/qede_rxtx.h\n+++ b/drivers/net/qede/qede_rxtx.h\n@@ -225,7 +225,7 @@ struct qede_fastpath {\n \tstruct qede_tx_queue *txq;\n };\n \n-/* This structure holds the inforation of fast path queues\n+/* This structure holds the information of fast path queues\n  * belonging to individual engines in CMT mode.\n  */\n struct qede_fastpath_cmt {\ndiff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c\nindex ed714fe0..2cead4e0 100644\n--- a/drivers/net/sfc/sfc.c\n+++ b/drivers/net/sfc/sfc.c\n@@ -371,7 +371,7 @@ sfc_set_drv_limits(struct sfc_adapter *sa)\n \n \t/*\n \t * Limits are strict since take into account initial estimation.\n-\t * Resource allocation stategy is described in\n+\t * Resource allocation strategy is described in\n \t * sfc_estimate_resource_limits().\n \t */\n \tlim.edl_min_evq_count = lim.edl_max_evq_count =\ndiff --git a/drivers/net/sfc/sfc_dp.c b/drivers/net/sfc/sfc_dp.c\nindex d4cd1625..da2d1603 100644\n--- a/drivers/net/sfc/sfc_dp.c\n+++ b/drivers/net/sfc/sfc_dp.c\n@@ -68,7 +68,7 @@ sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry)\n {\n \tif (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) {\n \t\tSFC_GENERIC_LOG(ERR,\n-\t\t\t\"sfc %s dapapath '%s' already registered\",\n+\t\t\t\"sfc %s datapath '%s' already registered\",\n \t\t\tentry->type == SFC_DP_RX ? \"Rx\" :\n \t\t\tentry->type == SFC_DP_TX ? \"Tx\" :\n \t\t\t\"unknown\",\ndiff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h\nindex 760540ba..246adbd8 100644\n--- a/drivers/net/sfc/sfc_dp_rx.h\n+++ b/drivers/net/sfc/sfc_dp_rx.h\n@@ -158,7 +158,7 @@ typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id,\n \t\t\t\t  struct sfc_dp_rxq **dp_rxqp);\n \n /**\n- * Free resources allocated for datapath recevie queue.\n+ * Free resources allocated for datapath receive queue.\n  */\n typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq);\n \n@@ -191,7 +191,7 @@ typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq,\n /**\n  * Receive queue purge function called after queue flush.\n  *\n- * Should be used to free unused recevie buffers.\n+ * Should be used to free unused receive buffers.\n  */\n typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);\n \ndiff --git a/drivers/net/sfc/sfc_ef100.h b/drivers/net/sfc/sfc_ef100.h\nindex 5e2052d1..e81847e7 100644\n--- a/drivers/net/sfc/sfc_ef100.h\n+++ b/drivers/net/sfc/sfc_ef100.h\n@@ -19,7 +19,7 @@ extern \"C\" {\n  *\n  * @param evq_prime\tGlobal address of the prime register\n  * @param evq_hw_index\tEvent queue index\n- * @param evq_read_ptr\tMasked event qeueu read pointer\n+ * @param evq_read_ptr\tMasked event queue read pointer\n  */\n static inline void\n sfc_ef100_evq_prime(volatile void *evq_prime, unsigned int evq_hw_index,\ndiff --git a/drivers/net/sfc/sfc_ef100_rx.c b/drivers/net/sfc/sfc_ef100_rx.c\nindex 5d16bf28..45253ed7 100644\n--- a/drivers/net/sfc/sfc_ef100_rx.c\n+++ b/drivers/net/sfc/sfc_ef100_rx.c\n@@ -851,7 +851,7 @@ sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr,\n \tunsup_rx_prefix_fields =\n \t\tefx_rx_prefix_layout_check(pinfo, &sfc_ef100_rx_prefix_layout);\n \n-\t/* LENGTH and CLASS filds must always be present */\n+\t/* LENGTH and CLASS fields must always be present */\n \tif ((unsup_rx_prefix_fields &\n \t     ((1U << EFX_RX_PREFIX_FIELD_LENGTH) |\n \t      (1U << EFX_RX_PREFIX_FIELD_CLASS))) != 0)\ndiff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c\nindex 712c2076..78bd4303 100644\n--- a/drivers/net/sfc/sfc_ef10_essb_rx.c\n+++ b/drivers/net/sfc/sfc_ef10_essb_rx.c\n@@ -630,7 +630,7 @@ sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id,\n \t\t\t      rxq->block_size, rxq->buf_stride);\n \tsfc_ef10_essb_rx_info(&rxq->dp.dpq,\n \t\t\t      \"max fill level is %u descs (%u bufs), \"\n-\t\t\t      \"refill threashold %u descs (%u bufs)\",\n+\t\t\t      \"refill threshold %u descs (%u bufs)\",\n \t\t\t      rxq->max_fill_level,\n \t\t\t      rxq->max_fill_level * rxq->block_size,\n \t\t\t      rxq->refill_threshold,\ndiff --git a/drivers/net/sfc/sfc_ef10_rx_ev.h b/drivers/net/sfc/sfc_ef10_rx_ev.h\nindex 821e2227..412254e3 100644\n--- a/drivers/net/sfc/sfc_ef10_rx_ev.h\n+++ b/drivers/net/sfc/sfc_ef10_rx_ev.h\n@@ -40,7 +40,7 @@ sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m,\n \t\trte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |\n \t\t\t\t (1ull << ESF_DZ_RX_ECRC_ERR_LBN) |\n \t\t\t\t (1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))) {\n-\t\t/* Zero packet type is used as a marker to dicard bad packets */\n+\t\t/* Zero packet type is used as a marker to discard bad packets */\n \t\tgoto done;\n \t}\n \ndiff --git a/drivers/net/sfc/sfc_intr.c b/drivers/net/sfc/sfc_intr.c\nindex ab67aa92..ddddefad 100644\n--- a/drivers/net/sfc/sfc_intr.c\n+++ b/drivers/net/sfc/sfc_intr.c\n@@ -8,7 +8,7 @@\n  */\n \n /*\n- * At the momemt of writing DPDK v16.07 has notion of two types of\n+ * At the moment of writing DPDK v16.07 has notion of two types of\n  * interrupts: LSC (link status change) and RXQ (receive indication).\n  * It allows to register interrupt callback for entire device which is\n  * not intended to be used for receive indication (i.e. link status\ndiff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c\nindex b34c9afd..9127c903 100644\n--- a/drivers/net/sfc/sfc_mae.c\n+++ b/drivers/net/sfc/sfc_mae.c\n@@ -1805,7 +1805,7 @@ struct sfc_mae_field_locator {\n \tefx_mae_field_id_t\t\tfield_id;\n \tsize_t\t\t\t\tsize;\n \t/* Field offset in the corresponding rte_flow_item_ struct */\n-\tsize_t\t\t\t\tofst;\n+\tsize_t\t\t\t\toffset;\n };\n \n static void\n@@ -1820,8 +1820,8 @@ sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,\n \tfor (i = 0; i < nb_field_locators; ++i) {\n \t\tconst struct sfc_mae_field_locator *fl = &field_locators[i];\n \n-\t\tSFC_ASSERT(fl->ofst + fl->size <= mask_size);\n-\t\tmemset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);\n+\t\tSFC_ASSERT(fl->offset + fl->size <= mask_size);\n+\t\tmemset(RTE_PTR_ADD(mask_ptr, fl->offset), 0xff, fl->size);\n \t}\n }\n \n@@ -1843,8 +1843,8 @@ sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,\n \n \t\trc = efx_mae_match_spec_field_set(ctx->match_spec,\n \t\t\t\t\t\t  fremap[fl->field_id],\n-\t\t\t\t\t\t  fl->size, spec + fl->ofst,\n-\t\t\t\t\t\t  fl->size, mask + fl->ofst);\n+\t\t\t\t\t\t  fl->size, spec + fl->offset,\n+\t\t\t\t\t\t  fl->size, mask + fl->offset);\n \t\tif (rc != 0)\n \t\t\tbreak;\n \t}\n@@ -2387,7 +2387,7 @@ static const struct sfc_mae_field_locator flocs_tunnel[] = {\n \t\t * for Geneve and NVGRE, too.\n \t\t */\n \t\t.size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),\n-\t\t.ofst = offsetof(struct rte_flow_item_vxlan, vni),\n+\t\t.offset = offsetof(struct rte_flow_item_vxlan, vni),\n \t},\n };\n \n@@ -3297,7 +3297,7 @@ sfc_mae_rule_parse_action_of_set_vlan_pcp(\n \n struct sfc_mae_parsed_item {\n \tconst struct rte_flow_item\t*item;\n-\tsize_t\t\t\t\tproto_header_ofst;\n+\tsize_t\t\t\t\tproto_header_offset;\n \tsize_t\t\t\t\tproto_header_size;\n };\n \n@@ -3316,20 +3316,20 @@ sfc_mae_header_force_item_masks(uint8_t *header_buf,\n \t\tconst struct sfc_mae_parsed_item *parsed_item;\n \t\tconst struct rte_flow_item *item;\n \t\tsize_t proto_header_size;\n-\t\tsize_t ofst;\n+\t\tsize_t offset;\n \n \t\tparsed_item = &parsed_items[item_idx];\n \t\tproto_header_size = parsed_item->proto_header_size;\n \t\titem = parsed_item->item;\n \n-\t\tfor (ofst = 0; ofst < proto_header_size;\n-\t\t     ofst += sizeof(rte_be16_t)) {\n-\t\t\trte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);\n+\t\tfor (offset = 0; offset < proto_header_size;\n+\t\t     offset += sizeof(rte_be16_t)) {\n+\t\t\trte_be16_t *wp = RTE_PTR_ADD(header_buf, offset);\n \t\t\tconst rte_be16_t *w_maskp;\n \t\t\tconst rte_be16_t *w_specp;\n \n-\t\t\tw_maskp = RTE_PTR_ADD(item->mask, ofst);\n-\t\t\tw_specp = RTE_PTR_ADD(item->spec, ofst);\n+\t\t\tw_maskp = RTE_PTR_ADD(item->mask, offset);\n+\t\t\tw_specp = RTE_PTR_ADD(item->spec, offset);\n \n \t\t\t*wp &= ~(*w_maskp);\n \t\t\t*wp |= (*w_specp & *w_maskp);\n@@ -3363,7 +3363,7 @@ sfc_mae_rule_parse_action_vxlan_encap(\n \t\t\t\t\t\t1 /* VXLAN */];\n \tunsigned int nb_parsed_items = 0;\n \n-\tsize_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);\n+\tsize_t eth_ethertype_offset = offsetof(struct rte_ether_hdr, ether_type);\n \tuint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),\n \t\t\t\t  sizeof(struct rte_ipv6_hdr))];\n \tstruct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;\n@@ -3371,8 +3371,8 @@ sfc_mae_rule_parse_action_vxlan_encap(\n \tstruct rte_vxlan_hdr *vxlan = NULL;\n \tstruct rte_udp_hdr *udp = NULL;\n \tunsigned int nb_vlan_tags = 0;\n-\tsize_t next_proto_ofst = 0;\n-\tsize_t ethertype_ofst = 0;\n+\tsize_t next_proto_offset = 0;\n+\tsize_t ethertype_offset = 0;\n \tuint64_t exp_items;\n \tint rc;\n \n@@ -3444,7 +3444,7 @@ sfc_mae_rule_parse_action_vxlan_encap(\n \n \t\t\tproto_header_size = sizeof(struct rte_ether_hdr);\n \n-\t\t\tethertype_ofst = eth_ethertype_ofst;\n+\t\t\tethertype_offset = eth_ethertype_offset;\n \n \t\t\texp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |\n \t\t\t\t    RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |\n@@ -3458,13 +3458,13 @@ sfc_mae_rule_parse_action_vxlan_encap(\n \n \t\t\tproto_header_size = sizeof(struct rte_vlan_hdr);\n \n-\t\t\tethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);\n+\t\t\tethertypep = RTE_PTR_ADD(buf, eth_ethertype_offset);\n \t\t\t*ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);\n \n-\t\t\tethertypep = RTE_PTR_ADD(buf, ethertype_ofst);\n+\t\t\tethertypep = RTE_PTR_ADD(buf, ethertype_offset);\n \t\t\t*ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);\n \n-\t\t\tethertype_ofst =\n+\t\t\tethertype_offset =\n \t\t\t    bounce_eh->size +\n \t\t\t    offsetof(struct rte_vlan_hdr, eth_proto);\n \n@@ -3482,10 +3482,10 @@ sfc_mae_rule_parse_action_vxlan_encap(\n \n \t\t\tproto_header_size = sizeof(struct rte_ipv4_hdr);\n \n-\t\t\tethertypep = RTE_PTR_ADD(buf, ethertype_ofst);\n+\t\t\tethertypep = RTE_PTR_ADD(buf, ethertype_offset);\n \t\t\t*ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);\n \n-\t\t\tnext_proto_ofst =\n+\t\t\tnext_proto_offset =\n \t\t\t    bounce_eh->size +\n \t\t\t    offsetof(struct rte_ipv4_hdr, next_proto_id);\n \n@@ -3501,10 +3501,10 @@ sfc_mae_rule_parse_action_vxlan_encap(\n \n \t\t\tproto_header_size = sizeof(struct rte_ipv6_hdr);\n \n-\t\t\tethertypep = RTE_PTR_ADD(buf, ethertype_ofst);\n+\t\t\tethertypep = RTE_PTR_ADD(buf, ethertype_offset);\n \t\t\t*ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);\n \n-\t\t\tnext_proto_ofst = bounce_eh->size +\n+\t\t\tnext_proto_offset = bounce_eh->size +\n \t\t\t\t\t  offsetof(struct rte_ipv6_hdr, proto);\n \n \t\t\tipv6 = (struct rte_ipv6_hdr *)buf_cur;\n@@ -3519,7 +3519,7 @@ sfc_mae_rule_parse_action_vxlan_encap(\n \n \t\t\tproto_header_size = sizeof(struct rte_udp_hdr);\n \n-\t\t\tnext_protop = RTE_PTR_ADD(buf, next_proto_ofst);\n+\t\t\tnext_protop = RTE_PTR_ADD(buf, next_proto_offset);\n \t\t\t*next_protop = IPPROTO_UDP;\n \n \t\t\tudp = (struct rte_udp_hdr *)buf_cur;\ndiff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c\nindex 71042841..cd58d60a 100644\n--- a/drivers/net/sfc/sfc_rx.c\n+++ b/drivers/net/sfc/sfc_rx.c\n@@ -1057,7 +1057,7 @@ sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)\n \t/* Make sure that end padding does not write beyond the buffer */\n \tif (buf_aligned < nic_align_end) {\n \t\t/*\n-\t\t * Estimate space which can be lost. If guarnteed buffer\n+\t\t * Estimate space which can be lost. If guaranteed buffer\n \t\t * size is odd, lost space is (nic_align_end - 1). More\n \t\t * accurate formula is below.\n \t\t */\n@@ -1702,7 +1702,7 @@ sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)\n \n \t/*\n \t * Finalize only ethdev queues since other ones are finalized only\n-\t * on device close and they may require additional deinitializaton.\n+\t * on device close and they may require additional deinitialization.\n \t */\n \tethdev_qid = sas->ethdev_rxq_count;\n \twhile (--ethdev_qid >= (int)nb_rx_queues) {\n@@ -1775,7 +1775,7 @@ sfc_rx_configure(struct sfc_adapter *sa)\n \n \t\treconfigure = true;\n \n-\t\t/* Do not ununitialize reserved queues */\n+\t\t/* Do not uninitialize reserved queues */\n \t\tif (nb_rx_queues < sas->ethdev_rxq_count)\n \t\t\tsfc_rx_fini_queues(sa, nb_rx_queues);\n \ndiff --git a/drivers/net/sfc/sfc_tso.h b/drivers/net/sfc/sfc_tso.h\nindex 9029ad15..f2fba304 100644\n--- a/drivers/net/sfc/sfc_tso.h\n+++ b/drivers/net/sfc/sfc_tso.h\n@@ -53,21 +53,21 @@ sfc_tso_outer_udp_fix_len(const struct rte_mbuf *m, uint8_t *tsoh)\n \n static inline void\n sfc_tso_innermost_ip_fix_len(const struct rte_mbuf *m, uint8_t *tsoh,\n-\t\t\t     size_t iph_ofst)\n+\t\t\t     size_t iph_offset)\n {\n \tsize_t ip_payload_len = m->l4_len + m->tso_segsz;\n-\tsize_t field_ofst;\n+\tsize_t field_offset;\n \trte_be16_t len;\n \n \tif (m->ol_flags & RTE_MBUF_F_TX_IPV4) {\n-\t\tfield_ofst = offsetof(struct rte_ipv4_hdr, total_length);\n+\t\tfield_offset = offsetof(struct rte_ipv4_hdr, total_length);\n \t\tlen = rte_cpu_to_be_16(m->l3_len + ip_payload_len);\n \t} else {\n-\t\tfield_ofst = offsetof(struct rte_ipv6_hdr, payload_len);\n+\t\tfield_offset = offsetof(struct rte_ipv6_hdr, payload_len);\n \t\tlen = rte_cpu_to_be_16(ip_payload_len);\n \t}\n \n-\trte_memcpy(tsoh + iph_ofst + field_ofst, &len, sizeof(len));\n+\trte_memcpy(tsoh + iph_offset + field_offset, &len, sizeof(len));\n }\n \n unsigned int sfc_tso_prepare_header(uint8_t *tsoh, size_t header_len,\ndiff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c\nindex 0dccf21f..cd927cf2 100644\n--- a/drivers/net/sfc/sfc_tx.c\n+++ b/drivers/net/sfc/sfc_tx.c\n@@ -356,7 +356,7 @@ sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues)\n \n \t/*\n \t * Finalize only ethdev queues since other ones are finalized only\n-\t * on device close and they may require additional deinitializaton.\n+\t * on device close and they may require additional deinitialization.\n \t */\n \tethdev_qid = sas->ethdev_txq_count;\n \twhile (--ethdev_qid >= (int)nb_tx_queues) {\ndiff --git a/drivers/net/softnic/rte_eth_softnic_flow.c b/drivers/net/softnic/rte_eth_softnic_flow.c\nindex ca70eab6..ad96288e 100644\n--- a/drivers/net/softnic/rte_eth_softnic_flow.c\n+++ b/drivers/net/softnic/rte_eth_softnic_flow.c\n@@ -930,7 +930,7 @@ flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,\n  * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*\n  * respectively.\n  * They are located within a larger buffer at offsets *toffset* and *foffset*\n- * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger\n+ * respectively. Both *tmask* and *fmask* represent bitmasks for the larger\n  * buffer.\n  * Question: are the two masks equivalent?\n  *\ndiff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c\nindex f1b48cae..5bb472f1 100644\n--- a/drivers/net/tap/rte_eth_tap.c\n+++ b/drivers/net/tap/rte_eth_tap.c\n@@ -525,7 +525,7 @@ tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,\n \t}\n }\n \n-/* Accumaulate L4 raw checksums */\n+/* Accumulate L4 raw checksums */\n static void\n tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,\n \t\t\tuint32_t *l4_raw_cksum)\ndiff --git a/drivers/net/tap/tap_bpf_api.c b/drivers/net/tap/tap_bpf_api.c\nindex 98f6a760..15283f89 100644\n--- a/drivers/net/tap/tap_bpf_api.c\n+++ b/drivers/net/tap/tap_bpf_api.c\n@@ -96,7 +96,7 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,\n  * Load BPF instructions to kernel\n  *\n  * @param[in] type\n- *   BPF program type: classifieir or action\n+ *   BPF program type: classifier or action\n  *\n  * @param[in] insns\n  *   Array of BPF instructions (equivalent to BPF instructions)\n@@ -104,7 +104,7 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,\n  * @param[in] insns_cnt\n  *   Number of BPF instructions (size of array)\n  *\n- * @param[in] lincense\n+ * @param[in] license\n  *   License string that must be acknowledged by the kernel\n  *\n  * @return\ndiff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c\nindex c4f60ce9..76738239 100644\n--- a/drivers/net/tap/tap_flow.c\n+++ b/drivers/net/tap/tap_flow.c\n@@ -961,7 +961,7 @@ add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata)\n }\n \n /**\n- * Helper function to send a serie of TC actions to the kernel\n+ * Helper function to send a series of TC actions to the kernel\n  *\n  * @param[in] flow\n  *   Pointer to rte flow containing the netlink message\n@@ -2017,7 +2017,7 @@ static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx)\n \t\t\tbreak;\n \n \t\t/*\n-\t\t * Subtract offest to restore real key index\n+\t\t * Subtract offset to restore real key index\n \t\t * If a non RSS flow is falsely trying to release map\n \t\t * entry 0 - the offset subtraction will calculate the real\n \t\t * map index as an out-of-range value and the release operation\ndiff --git a/drivers/net/thunderx/nicvf_svf.c b/drivers/net/thunderx/nicvf_svf.c\nindex bccf2905..1bcf73d9 100644\n--- a/drivers/net/thunderx/nicvf_svf.c\n+++ b/drivers/net/thunderx/nicvf_svf.c\n@@ -21,7 +21,7 @@ nicvf_svf_push(struct nicvf *vf)\n \n \tentry = rte_zmalloc(\"nicvf\", sizeof(*entry), RTE_CACHE_LINE_SIZE);\n \tif (entry == NULL)\n-\t\trte_panic(\"Cannoc allocate memory for svf_entry\\n\");\n+\t\trte_panic(\"Cannot allocate memory for svf_entry\\n\");\n \n \tentry->vf = vf;\n \ndiff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c\nindex 47d0e6ea..e617c9af 100644\n--- a/drivers/net/txgbe/txgbe_ethdev.c\n+++ b/drivers/net/txgbe/txgbe_ethdev.c\n@@ -1026,12 +1026,12 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,\n {\n \tstruct txgbe_hw *hw = TXGBE_DEV_HW(dev);\n \tint ret = 0;\n-\tuint32_t portctrl, vlan_ext, qinq;\n+\tuint32_t portctl, vlan_ext, qinq;\n \n-\tportctrl = rd32(hw, TXGBE_PORTCTL);\n+\tportctl = rd32(hw, TXGBE_PORTCTL);\n \n-\tvlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);\n-\tqinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);\n+\tvlan_ext = (portctl & TXGBE_PORTCTL_VLANEXT);\n+\tqinq = vlan_ext && (portctl & TXGBE_PORTCTL_QINQ);\n \tswitch (vlan_type) {\n \tcase RTE_ETH_VLAN_TYPE_INNER:\n \t\tif (vlan_ext) {\n@@ -1678,7 +1678,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)\n \t\t\treturn -ENOMEM;\n \t\t}\n \t}\n-\t/* confiugre msix for sleep until rx interrupt */\n+\t/* configure msix for sleep until rx interrupt */\n \ttxgbe_configure_msix(dev);\n \n \t/* initialize transmission unit */\n@@ -3682,7 +3682,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,\n \t\twr32(hw, TXGBE_IVARMISC, tmp);\n \t} else {\n \t\t/* rx or tx causes */\n-\t\t/* Workround for ICR lost */\n+\t\t/* Workaround for ICR lost */\n \t\tidx = ((16 * (queue & 1)) + (8 * direction));\n \t\ttmp = rd32(hw, TXGBE_IVAR(queue >> 1));\n \t\ttmp &= ~(0xFF << idx);\n@@ -4387,7 +4387,7 @@ txgbe_timesync_disable(struct rte_eth_dev *dev)\n \t/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */\n \twr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);\n \n-\t/* Stop incrementating the System Time registers. */\n+\t/* Stop incrementing the System Time registers. */\n \twr32(hw, TXGBE_TSTIMEINC, 0);\n \n \treturn 0;\ndiff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c\nindex 84b960b8..f52cd8bc 100644\n--- a/drivers/net/txgbe/txgbe_ethdev_vf.c\n+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c\n@@ -961,7 +961,7 @@ txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction,\n \t\twr32(hw, TXGBE_VFIVARMISC, tmp);\n \t} else {\n \t\t/* rx or tx cause */\n-\t\t/* Workround for ICR lost */\n+\t\t/* Workaround for ICR lost */\n \t\tidx = ((16 * (queue & 1)) + (8 * direction));\n \t\ttmp = rd32(hw, TXGBE_VFIVAR(queue >> 1));\n \t\ttmp &= ~(0xFF << idx);\n@@ -997,7 +997,7 @@ txgbevf_configure_msix(struct rte_eth_dev *dev)\n \t/* Configure all RX queues of VF */\n \tfor (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {\n \t\t/* Force all queue use vector 0,\n-\t\t * as TXGBE_VF_MAXMSIVECOTR = 1\n+\t\t * as TXGBE_VF_MAXMSIVECTOR = 1\n \t\t */\n \t\ttxgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);\n \t\trte_intr_vec_list_index_set(intr_handle, q_idx,\n@@ -1288,7 +1288,7 @@ txgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)\n \n \t/* only one misc vector supported - mailbox */\n \teicr &= TXGBE_VFICR_MASK;\n-\t/* Workround for ICR lost */\n+\t/* Workaround for ICR lost */\n \tintr->flags |= TXGBE_FLAG_MAILBOX;\n \n \t/* To avoid compiler warnings set eicr to used. */\ndiff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c\nindex 445733f3..e2701063 100644\n--- a/drivers/net/txgbe/txgbe_ipsec.c\n+++ b/drivers/net/txgbe/txgbe_ipsec.c\n@@ -288,7 +288,7 @@ txgbe_crypto_remove_sa(struct rte_eth_dev *dev,\n \t\t\treturn -1;\n \t\t}\n \n-\t\t/* Disable and clear Rx SPI and key table entryes*/\n+\t\t/* Disable and clear Rx SPI and key table entries*/\n \t\treg_val = TXGBE_IPSRXIDX_WRITE |\n \t\t\tTXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);\n \t\twr32(hw, TXGBE_IPSRXSPI, 0);\ndiff --git a/drivers/net/txgbe/txgbe_pf.c b/drivers/net/txgbe/txgbe_pf.c\nindex 30be2873..67d92bfa 100644\n--- a/drivers/net/txgbe/txgbe_pf.c\n+++ b/drivers/net/txgbe/txgbe_pf.c\n@@ -236,7 +236,7 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev)\n \n \twr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);\n \n-\t/* clear VMDq map to perment rar 0 */\n+\t/* clear VMDq map to permanent rar 0 */\n \thw->mac.clear_vmdq(hw, 0, BIT_MASK32);\n \n \t/* clear VMDq map to scan rar 127 */\ndiff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c\nindex c2588369..b317649d 100644\n--- a/drivers/net/virtio/virtio_ethdev.c\n+++ b/drivers/net/virtio/virtio_ethdev.c\n@@ -2657,7 +2657,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)\n \thw->has_rx_offload = rx_offload_enabled(hw);\n \n \tif (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)\n-\t\t/* Enable vector (0) for Link State Intrerrupt */\n+\t\t/* Enable vector (0) for Link State Interrupt */\n \t\tif (VIRTIO_OPS(hw)->set_config_irq(hw, 0) ==\n \t\t\t\tVIRTIO_MSI_NO_VECTOR) {\n \t\t\tPMD_DRV_LOG(ERR, \"failed to set config vector\");\n@@ -2775,7 +2775,7 @@ virtio_dev_start(struct rte_eth_dev *dev)\n \t\t}\n \t}\n \n-\t/* Enable uio/vfio intr/eventfd mapping: althrough we already did that\n+\t/* Enable uio/vfio intr/eventfd mapping: although we already did that\n \t * in device configure, but it could be unmapped  when device is\n \t * stopped.\n \t */\ndiff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c\nindex 182cfc9e..632451dc 100644\n--- a/drivers/net/virtio/virtio_pci.c\n+++ b/drivers/net/virtio/virtio_pci.c\n@@ -235,7 +235,7 @@ legacy_get_isr(struct virtio_hw *hw)\n \treturn dst;\n }\n \n-/* Enable one vector (0) for Link State Intrerrupt */\n+/* Enable one vector (0) for Link State Interrupt */\n static uint16_t\n legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)\n {\ndiff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c\nindex 2e115ded..b39dd92d 100644\n--- a/drivers/net/virtio/virtio_rxtx.c\n+++ b/drivers/net/virtio/virtio_rxtx.c\n@@ -962,7 +962,7 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)\n \t\t\treturn -EINVAL;\n \t\t}\n \n-\t\t/* Update mss lengthes in mbuf */\n+\t\t/* Update mss lengths in mbuf */\n \t\tm->tso_segsz = hdr->gso_size;\n \t\tswitch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {\n \t\t\tcase VIRTIO_NET_HDR_GSO_TCPV4:\ndiff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.h b/drivers/net/virtio/virtio_rxtx_packed_avx.h\nindex 8cb71f3f..584ac72f 100644\n--- a/drivers/net/virtio/virtio_rxtx_packed_avx.h\n+++ b/drivers/net/virtio/virtio_rxtx_packed_avx.h\n@@ -192,7 +192,7 @@ virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,\n \n \t/*\n \t * load len from desc, store into mbuf pkt_len and data_len\n-\t * len limiated by l6bit buf_len, pkt_len[16:31] can be ignored\n+\t * len limited by l6bit buf_len, pkt_len[16:31] can be ignored\n \t */\n \tconst __mmask16 mask = 0x6 | 0x6 << 4 | 0x6 << 8 | 0x6 << 12;\n \t__m512i values = _mm512_maskz_shuffle_epi32(mask, v_desc, 0xAA);\ndiff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c\nindex 65bf792e..c98d696e 100644\n--- a/drivers/net/virtio/virtqueue.c\n+++ b/drivers/net/virtio/virtqueue.c\n@@ -13,7 +13,7 @@\n /*\n  * Two types of mbuf to be cleaned:\n  * 1) mbuf that has been consumed by backend but not used by virtio.\n- * 2) mbuf that hasn't been consued by backend.\n+ * 2) mbuf that hasn't been consumed by backend.\n  */\n struct rte_mbuf *\n virtqueue_detach_unused(struct virtqueue *vq)\ndiff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h\nindex 855f57a9..99c68cf6 100644\n--- a/drivers/net/virtio/virtqueue.h\n+++ b/drivers/net/virtio/virtqueue.h\n@@ -227,7 +227,7 @@ struct virtio_net_ctrl_rss {\n  * Control link announce acknowledgement\n  *\n  * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that\n- * driver has recevied the notification; device would clear the\n+ * driver has received the notification; device would clear the\n  * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives\n  * this command.\n  */\n@@ -312,7 +312,7 @@ struct virtqueue {\n \tstruct vq_desc_extra vq_descx[0];\n };\n \n-/* If multiqueue is provided by host, then we suppport it. */\n+/* If multiqueue is provided by host, then we support it. */\n #define VIRTIO_NET_CTRL_MQ   4\n \n #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET        0\ndiff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\nindex de26d2ae..ebc2cd5d 100644\n--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\n+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\n@@ -653,7 +653,7 @@ dpdmai_dev_dequeue_multijob_prefetch(\n \trte_prefetch0((void *)(size_t)(dq_storage + 1));\n \n \t/* Prepare next pull descriptor. This will give space for the\n-\t * prefething done on DQRR entries\n+\t * prefetching done on DQRR entries\n \t */\n \tq_storage->toggle ^= 1;\n \tdq_storage1 = q_storage->dq_storage[q_storage->toggle];\ndiff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\nindex d6f6bb55..1973d5d2 100644\n--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\n+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\n@@ -82,7 +82,7 @@ struct qdma_device {\n \t/** total number of hw queues. */\n \tuint16_t num_hw_queues;\n \t/**\n-\t * Maximum number of hw queues to be alocated per core.\n+\t * Maximum number of hw queues to be allocated per core.\n \t * This is limited by MAX_HW_QUEUE_PER_CORE\n \t */\n \tuint16_t max_hw_queues_per_core;\n@@ -268,7 +268,7 @@ struct dpaa2_dpdmai_dev {\n \tstruct fsl_mc_io dpdmai;\n \t/** HW ID for DPDMAI object */\n \tuint32_t dpdmai_id;\n-\t/** Tocken of this device */\n+\t/** Token of this device */\n \tuint16_t token;\n \t/** Number of queue in this DPDMAI device */\n \tuint8_t num_queues;\ndiff --git a/drivers/raw/ifpga/ifpga_rawdev.c b/drivers/raw/ifpga/ifpga_rawdev.c\nindex 8d9db585..0eae0c94 100644\n--- a/drivers/raw/ifpga/ifpga_rawdev.c\n+++ b/drivers/raw/ifpga/ifpga_rawdev.c\n@@ -382,7 +382,7 @@ ifpga_monitor_sensor(struct rte_rawdev *raw_dev,\n \n \t\t\tif (HIGH_WARN(sensor, value) ||\n \t\t\t\tLOW_WARN(sensor, value)) {\n-\t\t\t\tIFPGA_RAWDEV_PMD_INFO(\"%s reach theshold %d\\n\",\n+\t\t\t\tIFPGA_RAWDEV_PMD_INFO(\"%s reach threshold %d\\n\",\n \t\t\t\t\tsensor->name, value);\n \t\t\t\t*gsd_start = true;\n \t\t\t\tbreak;\n@@ -393,7 +393,7 @@ ifpga_monitor_sensor(struct rte_rawdev *raw_dev,\n \t\tif (!strcmp(sensor->name, \"12V AUX Voltage\")) {\n \t\t\tif (value < AUX_VOLTAGE_WARN) {\n \t\t\t\tIFPGA_RAWDEV_PMD_INFO(\n-\t\t\t\t\t\"%s reach theshold %d mV\\n\",\n+\t\t\t\t\t\"%s reach threshold %d mV\\n\",\n \t\t\t\t\tsensor->name, value);\n \t\t\t\t*gsd_start = true;\n \t\t\t\tbreak;\n@@ -441,12 +441,12 @@ static int set_surprise_link_check_aer(\n \t\tpos = ifpga_pci_find_ext_capability(fd, RTE_PCI_EXT_CAP_ID_ERR);\n \t\tif (!pos)\n \t\t\tgoto end;\n-\t\t/* save previout ECAP_AER+0x08 */\n+\t\t/* save previous ECAP_AER+0x08 */\n \t\tret = pread(fd, &data, sizeof(data), pos+0x08);\n \t\tif (ret == -1)\n \t\t\tgoto end;\n \t\tifpga_rdev->aer_old[0] = data;\n-\t\t/* save previout ECAP_AER+0x14 */\n+\t\t/* save previous ECAP_AER+0x14 */\n \t\tret = pread(fd, &data, sizeof(data), pos+0x14);\n \t\tif (ret == -1)\n \t\t\tgoto end;\n@@ -531,7 +531,7 @@ ifpga_monitor_start_func(void)\n \t\t\t\t\t     ifpga_rawdev_gsd_handle, NULL);\n \t\tif (ret != 0) {\n \t\t\tIFPGA_RAWDEV_PMD_ERR(\n-\t\t\t\t\"Fail to create ifpga nonitor thread\");\n+\t\t\t\t\"Fail to create ifpga monitor thread\");\n \t\t\treturn -1;\n \t\t}\n \t\tifpga_monitor_start = 1;\ndiff --git a/drivers/raw/ioat/ioat_rawdev.c b/drivers/raw/ioat/ioat_rawdev.c\nindex 5396671d..d4dcb233 100644\n--- a/drivers/raw/ioat/ioat_rawdev.c\n+++ b/drivers/raw/ioat/ioat_rawdev.c\n@@ -200,7 +200,7 @@ ioat_rawdev_create(const char *name, struct rte_pci_device *dev)\n \tioat->rawdev = rawdev;\n \tioat->mz = mz;\n \tioat->regs = dev->mem_resource[0].addr;\n-\tioat->doorbell = &ioat->regs->dmacount;\n+\tioat->doorbell = &ioat->regs->dmaccount;\n \tioat->ring_size = 0;\n \tioat->desc_ring = NULL;\n \tioat->status_addr = ioat->mz->iova +\ndiff --git a/drivers/raw/ioat/ioat_spec.h b/drivers/raw/ioat/ioat_spec.h\nindex 6aa467e4..51c4b3f8 100644\n--- a/drivers/raw/ioat/ioat_spec.h\n+++ b/drivers/raw/ioat/ioat_spec.h\n@@ -60,7 +60,7 @@ struct rte_ioat_registers {\n \tuint8_t\t\treserved6[0x2];\t/* 0x82 */\n \tuint8_t\t\tchancmd;\t/* 0x84 */\n \tuint8_t\t\treserved3[1];\t/* 0x85 */\n-\tuint16_t\tdmacount;\t/* 0x86 */\n+\tuint16_t\tdmaccount;\t/* 0x86 */\n \tuint64_t\tchansts;\t/* 0x88 */\n \tuint64_t\tchainaddr;\t/* 0x90 */\n \tuint64_t\tchancmp;\t/* 0x98 */\ndiff --git a/drivers/raw/ntb/ntb.h b/drivers/raw/ntb/ntb.h\nindex cdf7667d..c9ff33aa 100644\n--- a/drivers/raw/ntb/ntb.h\n+++ b/drivers/raw/ntb/ntb.h\n@@ -95,7 +95,7 @@ enum ntb_spad_idx {\n  * @spad_write: Write val to local/peer spad register.\n  * @db_read: Read doorbells status.\n  * @db_clear: Clear local doorbells.\n- * @db_set_mask: Set bits in db mask, preventing db interrpts generated\n+ * @db_set_mask: Set bits in db mask, preventing db interrupts generated\n  * for those db bits.\n  * @peer_db_set: Set doorbell bit to generate peer interrupt for that bit.\n  * @vector_bind: Bind vector source [intr] to msix vector [msix].\ndiff --git a/drivers/regex/mlx5/mlx5_regex_fastpath.c b/drivers/regex/mlx5/mlx5_regex_fastpath.c\nindex 9a2db7e4..72464cad 100644\n--- a/drivers/regex/mlx5/mlx5_regex_fastpath.c\n+++ b/drivers/regex/mlx5/mlx5_regex_fastpath.c\n@@ -226,7 +226,7 @@ complete_umr_wqe(struct mlx5_regex_qp *qp, struct mlx5_regex_hw_qp *qp_obj,\n \t\t\t rte_cpu_to_be_32(mkey_job->imkey->id));\n \t/* Set UMR WQE control seg. */\n \tucseg->mkey_mask |= rte_cpu_to_be_64(MLX5_WQE_UMR_CTRL_MKEY_MASK_LEN |\n-\t\t\t\tMLX5_WQE_UMR_CTRL_FLAG_TRNSLATION_OFFSET |\n+\t\t\t\tMLX5_WQE_UMR_CTRL_FLAG_TRANSLATION_OFFSET |\n \t\t\t\tMLX5_WQE_UMR_CTRL_MKEY_MASK_ACCESS_LOCAL_WRITE);\n \tucseg->klm_octowords = rte_cpu_to_be_16(klm_align);\n \t/* Set mkey context seg. */\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c\nindex b1b9053b..130d201a 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c\n@@ -160,7 +160,7 @@ mlx5_vdpa_vhost_mem_regions_prepare(int vid, uint8_t *mode, uint64_t *mem_size,\n  * The target here is to group all the physical memory regions of the\n  * virtio device in one indirect mkey.\n  * For KLM Fixed Buffer Size mode (HW find the translation entry in one\n- * read according to the guest phisical address):\n+ * read according to the guest physical address):\n  * All the sub-direct mkeys of it must be in the same size, hence, each\n  * one of them should be in the GCD size of all the virtio memory\n  * regions and the holes between them.\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex db971bad..2f32aef6 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -403,7 +403,7 @@ mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)\n \tif (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {\n \t\tif (!(priv->caps.virtio_queue_type & (1 <<\n \t\t\t\t\t\t     MLX5_VIRTQ_TYPE_PACKED))) {\n-\t\t\tDRV_LOG(ERR, \"Failed to configur PACKED mode for vdev \"\n+\t\t\tDRV_LOG(ERR, \"Failed to configure PACKED mode for vdev \"\n \t\t\t\t\"%d - it was not reported by HW/driver\"\n \t\t\t\t\" capability.\", priv->vid);\n \t\t\treturn -ENOTSUP;\ndiff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c\nindex ecafc5e4..fc7e8b81 100644\n--- a/examples/bbdev_app/main.c\n+++ b/examples/bbdev_app/main.c\n@@ -372,7 +372,7 @@ add_awgn(struct rte_mbuf **mbufs, uint16_t num_pkts)\n /* Encoder output to Decoder input adapter. The Decoder accepts only soft input\n  * so each bit of the encoder output must be translated into one byte of LLR. If\n  * Sub-block Deinterleaver is bypassed, which is the case, the padding bytes\n- * must additionally be insterted at the end of each sub-block.\n+ * must additionally be inserted at the end of each sub-block.\n  */\n static inline void\n transform_enc_out_dec_in(struct rte_mbuf **mbufs, uint8_t *temp_buf,\ndiff --git a/examples/bond/main.c b/examples/bond/main.c\nindex 1087b0da..335bde5c 100644\n--- a/examples/bond/main.c\n+++ b/examples/bond/main.c\n@@ -230,7 +230,7 @@ bond_port_init(struct rte_mempool *mbuf_pool)\n \t\t\t0 /*SOCKET_ID_ANY*/);\n \tif (retval < 0)\n \t\trte_exit(EXIT_FAILURE,\n-\t\t\t\t\"Faled to create bond port\\n\");\n+\t\t\t\t\"Failed to create bond port\\n\");\n \n \tBOND_PORT = retval;\n \n@@ -405,7 +405,7 @@ static int lcore_main(__rte_unused void *arg1)\n \t\t\t\t\t\tstruct rte_ether_hdr *);\n \t\t\tether_type = eth_hdr->ether_type;\n \t\t\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))\n-\t\t\t\tprintf(\"VLAN taged frame, offset:\");\n+\t\t\t\tprintf(\"VLAN tagged frame, offset:\");\n \t\t\toffset = get_vlan_offset(eth_hdr, &ether_type);\n \t\t\tif (offset > 0)\n \t\t\t\tprintf(\"%d\\n\", offset);\ndiff --git a/examples/dma/dmafwd.c b/examples/dma/dmafwd.c\nindex d074acc9..608487e3 100644\n--- a/examples/dma/dmafwd.c\n+++ b/examples/dma/dmafwd.c\n@@ -87,7 +87,7 @@ static uint16_t nb_queues = 1;\n /* MAC updating enabled by default. */\n static int mac_updating = 1;\n \n-/* hardare copy mode enabled by default. */\n+/* hardware copy mode enabled by default. */\n static copy_mode_t copy_mode = COPY_MODE_DMA_NUM;\n \n /* size of descriptor ring for hardware copy mode or\ndiff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c\nindex 86286d38..ffaad964 100644\n--- a/examples/ethtool/lib/rte_ethtool.c\n+++ b/examples/ethtool/lib/rte_ethtool.c\n@@ -402,7 +402,7 @@ rte_ethtool_net_set_rx_mode(uint16_t port_id)\n #endif\n \t}\n \n-\t/* Enable Rx vlan filter, VF unspport status is discard */\n+\t/* Enable Rx vlan filter, VF unsupported status is discard */\n \tret = rte_eth_dev_set_vlan_offload(port_id, RTE_ETH_VLAN_FILTER_MASK);\n \tif (ret != 0)\n \t\treturn ret;\ndiff --git a/examples/ethtool/lib/rte_ethtool.h b/examples/ethtool/lib/rte_ethtool.h\nindex f1770966..d27e0102 100644\n--- a/examples/ethtool/lib/rte_ethtool.h\n+++ b/examples/ethtool/lib/rte_ethtool.h\n@@ -189,7 +189,7 @@ int rte_ethtool_get_module_eeprom(uint16_t port_id,\n \n /**\n  * Retrieve the Ethernet device pause frame configuration according to\n- * parameter attributes desribed by ethtool data structure,\n+ * parameter attributes described by ethtool data structure,\n  * ethtool_pauseparam.\n  *\n  * @param port_id\n@@ -209,7 +209,7 @@ int rte_ethtool_get_pauseparam(uint16_t port_id,\n \n /**\n  * Setting the Ethernet device pause frame configuration according to\n- * parameter attributes desribed by ethtool data structure, ethtool_pauseparam.\n+ * parameter attributes described by ethtool data structure, ethtool_pauseparam.\n  *\n  * @param port_id\n  *   The port identifier of the Ethernet device.\ndiff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c\nindex fb3cac3b..1023bf6b 100644\n--- a/examples/ip_reassembly/main.c\n+++ b/examples/ip_reassembly/main.c\n@@ -244,7 +244,7 @@ static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];\n #endif /* RTE_LIBRTE_IP_FRAG_TBL_STAT */\n \n /*\n- * If number of queued packets reached given threahold, then\n+ * If number of queued packets reached given threshold, then\n  * send burst of packets on an output interface.\n  */\n static inline uint32_t\n@@ -877,7 +877,7 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)\n \t * Plus, each TX queue can hold up to <max_flow_num> packets.\n \t */\n \n-\t/* mbufs stored int the gragment table. 8< */\n+\t/* mbufs stored int the fragment table. 8< */\n \tnb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;\n \tnb_mbuf *= (port_conf.rxmode.mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN\n \t\t\t+ BUF_SIZE - 1) / BUF_SIZE;\ndiff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c\nindex e8600f5e..24b210ad 100644\n--- a/examples/ipsec-secgw/event_helper.c\n+++ b/examples/ipsec-secgw/event_helper.c\n@@ -1353,7 +1353,7 @@ eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)\n \tfor (i = 0; i < nb_rx_adapter; i++) {\n \t\tadapter = &(em_conf->rx_adapter[i]);\n \t\tsprintf(print_buf,\n-\t\t\t\"\\tRx adaper ID: %-2d\\tConnections: %-2d\\tEvent dev ID: %-2d\",\n+\t\t\t\"\\tRx adapter ID: %-2d\\tConnections: %-2d\\tEvent dev ID: %-2d\",\n \t\t\tadapter->adapter_id,\n \t\t\tadapter->nb_connections,\n \t\t\tadapter->eventdev_id);\ndiff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c\nindex bf3dbf6b..96916cd3 100644\n--- a/examples/ipsec-secgw/ipsec-secgw.c\n+++ b/examples/ipsec-secgw/ipsec-secgw.c\n@@ -265,7 +265,7 @@ struct socket_ctx socket_ctx[NB_SOCKETS];\n /*\n  * Determine is multi-segment support required:\n  *  - either frame buffer size is smaller then mtu\n- *  - or reassmeble support is requested\n+ *  - or reassemble support is requested\n  */\n static int\n multi_seg_required(void)\n@@ -2050,7 +2050,7 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,\n \n \tret = rte_hash_add_key_data(map, &key, (void *)i);\n \tif (ret < 0) {\n-\t\tprintf(\"Faled to insert cdev mapping for (lcore %u, \"\n+\t\tprintf(\"Failed to insert cdev mapping for (lcore %u, \"\n \t\t\t\t\"cdev %u, qp %u), errno %d\\n\",\n \t\t\t\tkey.lcore_id, ipsec_ctx->tbl[i].id,\n \t\t\t\tipsec_ctx->tbl[i].qp, ret);\n@@ -2083,7 +2083,7 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,\n \t\tstr = \"Inbound\";\n \t}\n \n-\t/* Required cryptodevs with operation chainning */\n+\t/* Required cryptodevs with operation chaining */\n \tif (!(dev_info->feature_flags &\n \t\t\t\tRTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))\n \t\treturn ret;\n@@ -2251,7 +2251,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)\n \t\t\t\"Error during getting device (port %u) info: %s\\n\",\n \t\t\tportid, strerror(-ret));\n \n-\t/* limit allowed HW offloafs, as user requested */\n+\t/* limit allowed HW offloads, as user requested */\n \tdev_info.rx_offload_capa &= dev_rx_offload;\n \tdev_info.tx_offload_capa &= dev_tx_offload;\n \n@@ -2298,7 +2298,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)\n \t\t\tlocal_port_conf.rxmode.offloads)\n \t\trte_exit(EXIT_FAILURE,\n \t\t\t\"Error: port %u required RX offloads: 0x%\" PRIx64\n-\t\t\t\", avaialbe RX offloads: 0x%\" PRIx64 \"\\n\",\n+\t\t\t\", available RX offloads: 0x%\" PRIx64 \"\\n\",\n \t\t\tportid, local_port_conf.rxmode.offloads,\n \t\t\tdev_info.rx_offload_capa);\n \n@@ -2306,7 +2306,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)\n \t\t\tlocal_port_conf.txmode.offloads)\n \t\trte_exit(EXIT_FAILURE,\n \t\t\t\"Error: port %u required TX offloads: 0x%\" PRIx64\n-\t\t\t\", avaialbe TX offloads: 0x%\" PRIx64 \"\\n\",\n+\t\t\t\", available TX offloads: 0x%\" PRIx64 \"\\n\",\n \t\t\tportid, local_port_conf.txmode.offloads,\n \t\t\tdev_info.tx_offload_capa);\n \n@@ -2317,7 +2317,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)\n \tif (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)\n \t\tlocal_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;\n \n-\tprintf(\"port %u configurng rx_offloads=0x%\" PRIx64\n+\tprintf(\"port %u configuring rx_offloads=0x%\" PRIx64\n \t\t\", tx_offloads=0x%\" PRIx64 \"\\n\",\n \t\tportid, local_port_conf.rxmode.offloads,\n \t\tlocal_port_conf.txmode.offloads);\ndiff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c\nindex 30bc693e..1839ac71 100644\n--- a/examples/ipsec-secgw/sa.c\n+++ b/examples/ipsec-secgw/sa.c\n@@ -897,7 +897,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,\n \t\t\tcontinue;\n \t\t}\n \n-\t\t/* unrecognizeable input */\n+\t\t/* unrecognizable input */\n \t\tAPP_CHECK(0, status, \"unrecognized input \\\"%s\\\"\",\n \t\t\ttokens[ti]);\n \t\treturn;\n@@ -1145,7 +1145,7 @@ get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,\n \tif (rc4 >= 0) {\n \t\tif (rc6 >= 0) {\n \t\t\tRTE_LOG(ERR, IPSEC,\n-\t\t\t\t\"%s: SPI %u used simultaeously by \"\n+\t\t\t\t\"%s: SPI %u used simultaneously by \"\n \t\t\t\t\"IPv4(%d) and IPv6 (%d) SP rules\\n\",\n \t\t\t\t__func__, spi, rc4, rc6);\n \t\t\treturn -EINVAL;\n@@ -1550,7 +1550,7 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)\n }\n \n /*\n- * Allocate space and init rte_ipsec_sa strcutures,\n+ * Allocate space and init rte_ipsec_sa structures,\n  * one per session.\n  */\n static int\ndiff --git a/examples/ipsec-secgw/sp4.c b/examples/ipsec-secgw/sp4.c\nindex beddd7bc..fc4101a4 100644\n--- a/examples/ipsec-secgw/sp4.c\n+++ b/examples/ipsec-secgw/sp4.c\n@@ -410,7 +410,7 @@ parse_sp4_tokens(char **tokens, uint32_t n_tokens,\n \t\t\tcontinue;\n \t\t}\n \n-\t\t/* unrecognizeable input */\n+\t\t/* unrecognizable input */\n \t\tAPP_CHECK(0, status, \"unrecognized input \\\"%s\\\"\",\n \t\t\ttokens[ti]);\n \t\treturn;\ndiff --git a/examples/ipsec-secgw/sp6.c b/examples/ipsec-secgw/sp6.c\nindex 328e0852..cce4da78 100644\n--- a/examples/ipsec-secgw/sp6.c\n+++ b/examples/ipsec-secgw/sp6.c\n@@ -515,7 +515,7 @@ parse_sp6_tokens(char **tokens, uint32_t n_tokens,\n \t\t\tcontinue;\n \t\t}\n \n-\t\t/* unrecognizeable input */\n+\t\t/* unrecognizable input */\n \t\tAPP_CHECK(0, status, \"unrecognized input \\\"%s\\\"\",\n \t\t\ttokens[ti]);\n \t\treturn;\ndiff --git a/examples/ipsec-secgw/test/common_defs.sh b/examples/ipsec-secgw/test/common_defs.sh\nindex f22eb3ab..3ef06bc7 100644\n--- a/examples/ipsec-secgw/test/common_defs.sh\n+++ b/examples/ipsec-secgw/test/common_defs.sh\n@@ -20,7 +20,7 @@ REMOTE_MAC=`ssh ${REMOTE_HOST} ip addr show dev ${REMOTE_IFACE}`\n st=$?\n REMOTE_MAC=`echo ${REMOTE_MAC} | sed -e 's/^.*ether //' -e 's/ brd.*$//'`\n if [[ $st -ne 0 || -z \"${REMOTE_MAC}\" ]]; then\n-\techo \"coouldn't retrieve ether addr from ${REMOTE_IFACE}\"\n+\techo \"couldn't retrieve ether addr from ${REMOTE_IFACE}\"\n \texit 127\n fi\n \n@@ -40,7 +40,7 @@ DPDK_VARS=\"\"\n \n # by default ipsec-secgw can't deal with multi-segment packets\n # make sure our local/remote host wouldn't generate fragmented packets\n-# if reassmebly option is not enabled\n+# if reassembly option is not enabled\n DEF_MTU_LEN=1400\n DEF_PING_LEN=1200\n \ndiff --git a/examples/kni/main.c b/examples/kni/main.c\nindex d324ee22..f5b20a7b 100644\n--- a/examples/kni/main.c\n+++ b/examples/kni/main.c\n@@ -1039,7 +1039,7 @@ main(int argc, char** argv)\n \tpthread_t kni_link_tid;\n \tint pid;\n \n-\t/* Associate signal_hanlder function with USR signals */\n+\t/* Associate signal_handler function with USR signals */\n \tsignal(SIGUSR1, signal_handler);\n \tsignal(SIGUSR2, signal_handler);\n \tsignal(SIGRTMIN, signal_handler);\ndiff --git a/examples/l2fwd-cat/l2fwd-cat.c b/examples/l2fwd-cat/l2fwd-cat.c\nindex d9cf00c9..6e16705e 100644\n--- a/examples/l2fwd-cat/l2fwd-cat.c\n+++ b/examples/l2fwd-cat/l2fwd-cat.c\n@@ -157,7 +157,7 @@ main(int argc, char *argv[])\n \tint ret = rte_eal_init(argc, argv);\n \tif (ret < 0)\n \t\trte_exit(EXIT_FAILURE, \"Error with EAL initialization\\n\");\n-\t/* >8 End of initializion the Environment Abstraction Layer (EAL). */\n+\t/* >8 End of initialization the Environment Abstraction Layer (EAL). */\n \n \targc -= ret;\n \targv += ret;\ndiff --git a/examples/l2fwd-event/l2fwd_event_generic.c b/examples/l2fwd-event/l2fwd_event_generic.c\nindex f31569a7..1977e232 100644\n--- a/examples/l2fwd-event/l2fwd_event_generic.c\n+++ b/examples/l2fwd-event/l2fwd_event_generic.c\n@@ -42,7 +42,7 @@ l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc)\n \t\tethdev_count++;\n \t}\n \n-\t/* Event device configurtion */\n+\t/* Event device configuration */\n \trte_event_dev_info_get(event_d_id, &dev_info);\n \n \t/* Enable implicit release */\ndiff --git a/examples/l2fwd-event/l2fwd_event_internal_port.c b/examples/l2fwd-event/l2fwd_event_internal_port.c\nindex 86d772d8..717a7bce 100644\n--- a/examples/l2fwd-event/l2fwd_event_internal_port.c\n+++ b/examples/l2fwd-event/l2fwd_event_internal_port.c\n@@ -40,7 +40,7 @@ l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc)\n \t\tethdev_count++;\n \t}\n \n-\t/* Event device configurtion */\n+\t/* Event device configuration */\n \trte_event_dev_info_get(event_d_id, &dev_info);\n \n \t/* Enable implicit release */\ndiff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c\nindex d8eabe4c..9e71ba2d 100644\n--- a/examples/l2fwd-jobstats/main.c\n+++ b/examples/l2fwd-jobstats/main.c\n@@ -468,7 +468,7 @@ l2fwd_flush_job(__rte_unused struct rte_timer *timer, __rte_unused void *arg)\n \t\tqconf->next_flush_time[portid] = rte_get_timer_cycles() + drain_tsc;\n \t}\n \n-\t/* Pass target to indicate that this job is happy of time interwal\n+\t/* Pass target to indicate that this job is happy of time interval\n \t * in which it was called. */\n \trte_jobstats_finish(&qconf->flush_job, qconf->flush_job.target);\n }\ndiff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c\nindex 1fb18072..151cb8f6 100644\n--- a/examples/l3fwd-acl/main.c\n+++ b/examples/l3fwd-acl/main.c\n@@ -801,8 +801,8 @@ send_packets(struct rte_mbuf **m, uint32_t *res, int num)\n }\n \n /*\n- * Parses IPV6 address, exepcts the following format:\n- * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexedecimal digit).\n+ * Parses IPV6 address, expects the following format:\n+ * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexadecimal digit).\n  */\n static int\n parse_ipv6_addr(const char *in, const char **end, uint32_t v[IPV6_ADDR_U32],\n@@ -1959,7 +1959,7 @@ check_all_ports_link_status(uint32_t port_mask)\n }\n \n /*\n- * build-up default vaues for dest MACs.\n+ * build-up default values for dest MACs.\n  */\n static void\n set_default_dest_mac(void)\ndiff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c\nindex b8b3be2b..20e5b59a 100644\n--- a/examples/l3fwd-power/main.c\n+++ b/examples/l3fwd-power/main.c\n@@ -433,7 +433,7 @@ signal_exit_now(int sigtype)\n \n }\n \n-/*  Freqency scale down timer callback */\n+/*  Frequency scale down timer callback */\n static void\n power_timer_cb(__rte_unused struct rte_timer *tim,\n \t\t\t  __rte_unused void *arg)\n@@ -2358,7 +2358,7 @@ update_telemetry(__rte_unused struct rte_timer *tim,\n \tret = rte_metrics_update_values(RTE_METRICS_GLOBAL, telstats_index,\n \t\t\t\t\tvalues, RTE_DIM(values));\n \tif (ret < 0)\n-\t\tRTE_LOG(WARNING, POWER, \"failed to update metrcis\\n\");\n+\t\tRTE_LOG(WARNING, POWER, \"failed to update metrics\\n\");\n }\n \n static int\ndiff --git a/examples/l3fwd/l3fwd_common.h b/examples/l3fwd/l3fwd_common.h\nindex 7d83ff64..cbaab79f 100644\n--- a/examples/l3fwd/l3fwd_common.h\n+++ b/examples/l3fwd/l3fwd_common.h\n@@ -51,7 +51,7 @@ rfc1812_process(struct rte_ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)\n #endif /* DO_RFC_1812_CHECKS */\n \n /*\n- * We group consecutive packets with the same destionation port into one burst.\n+ * We group consecutive packets with the same destination port into one burst.\n  * To avoid extra latency this is done together with some other packet\n  * processing, but after we made a final decision about packet's destination.\n  * To do this we maintain:\n@@ -76,7 +76,7 @@ rfc1812_process(struct rte_ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)\n \n static const struct {\n \tuint64_t pnum; /* prebuild 4 values for pnum[]. */\n-\tint32_t  idx;  /* index for new last updated elemnet. */\n+\tint32_t  idx;  /* index for new last updated element. */\n \tuint16_t lpv;  /* add value to the last updated element. */\n } gptbl[GRPSZ] = {\n \t{\ndiff --git a/examples/l3fwd/l3fwd_neon.h b/examples/l3fwd/l3fwd_neon.h\nindex 86ac5971..e3d33a52 100644\n--- a/examples/l3fwd/l3fwd_neon.h\n+++ b/examples/l3fwd/l3fwd_neon.h\n@@ -64,7 +64,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])\n \n /*\n  * Group consecutive packets with the same destination port in bursts of 4.\n- * Suppose we have array of destionation ports:\n+ * Suppose we have array of destination ports:\n  * dst_port[] = {a, b, c, d,, e, ... }\n  * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.\n  * We doing 4 comparisons at once and the result is 4 bit mask.\ndiff --git a/examples/l3fwd/l3fwd_sse.h b/examples/l3fwd/l3fwd_sse.h\nindex bb565ed5..d5a717e1 100644\n--- a/examples/l3fwd/l3fwd_sse.h\n+++ b/examples/l3fwd/l3fwd_sse.h\n@@ -64,7 +64,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])\n \n /*\n  * Group consecutive packets with the same destination port in bursts of 4.\n- * Suppose we have array of destionation ports:\n+ * Suppose we have array of destination ports:\n  * dst_port[] = {a, b, c, d,, e, ... }\n  * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.\n  * We doing 4 comparisons at once and the result is 4 bit mask.\ndiff --git a/examples/multi_process/hotplug_mp/commands.c b/examples/multi_process/hotplug_mp/commands.c\nindex 48fd3295..41ea265e 100644\n--- a/examples/multi_process/hotplug_mp/commands.c\n+++ b/examples/multi_process/hotplug_mp/commands.c\n@@ -175,7 +175,7 @@ static void cmd_dev_detach_parsed(void *parsed_result,\n \t\tcmdline_printf(cl, \"detached device %s\\n\",\n \t\t\tda.name);\n \telse\n-\t\tcmdline_printf(cl, \"failed to dettach device %s\\n\",\n+\t\tcmdline_printf(cl, \"failed to detach device %s\\n\",\n \t\t\tda.name);\n \trte_devargs_reset(&da);\n }\ndiff --git a/examples/multi_process/simple_mp/main.c b/examples/multi_process/simple_mp/main.c\nindex 5df2a390..9d5f1088 100644\n--- a/examples/multi_process/simple_mp/main.c\n+++ b/examples/multi_process/simple_mp/main.c\n@@ -4,7 +4,7 @@\n \n /*\n  * This sample application is a simple multi-process application which\n- * demostrates sharing of queues and memory pools between processes, and\n+ * demonstrates sharing of queues and memory pools between processes, and\n  * using those queues/pools for communication between the processes.\n  *\n  * Application is designed to run with two processes, a primary and a\ndiff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c\nindex b35886a7..05033776 100644\n--- a/examples/multi_process/symmetric_mp/main.c\n+++ b/examples/multi_process/symmetric_mp/main.c\n@@ -3,7 +3,7 @@\n  */\n \n /*\n- * Sample application demostrating how to do packet I/O in a multi-process\n+ * Sample application demonstrating how to do packet I/O in a multi-process\n  * environment. The same code can be run as a primary process and as a\n  * secondary process, just with a different proc-id parameter in each case\n  * (apart from the EAL flag to indicate a secondary process).\ndiff --git a/examples/ntb/ntb_fwd.c b/examples/ntb/ntb_fwd.c\nindex f110fc12..81964d03 100644\n--- a/examples/ntb/ntb_fwd.c\n+++ b/examples/ntb/ntb_fwd.c\n@@ -696,7 +696,7 @@ assign_stream_to_lcores(void)\n \t\t\tbreak;\n \t}\n \n-\t/* Print packet forwading config. */\n+\t/* Print packet forwarding config. */\n \tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n \t\tconf = &fwd_lcore_conf[lcore_id];\n \ndiff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c\nindex b01ac60f..99e67ef6 100644\n--- a/examples/packet_ordering/main.c\n+++ b/examples/packet_ordering/main.c\n@@ -686,7 +686,7 @@ main(int argc, char **argv)\n \tif (ret < 0)\n \t\trte_exit(EXIT_FAILURE, \"Invalid packet_ordering arguments\\n\");\n \n-\t/* Check if we have enought cores */\n+\t/* Check if we have enough cores */\n \tif (rte_lcore_count() < 3)\n \t\trte_exit(EXIT_FAILURE, \"Error, This application needs at \"\n \t\t\t\t\"least 3 logical cores to run:\\n\"\ndiff --git a/examples/performance-thread/common/lthread.c b/examples/performance-thread/common/lthread.c\nindex 009374a8..b02e0fc1 100644\n--- a/examples/performance-thread/common/lthread.c\n+++ b/examples/performance-thread/common/lthread.c\n@@ -178,7 +178,7 @@ lthread_create(struct lthread **new_lt, int lcore_id,\n \tbzero(lt, sizeof(struct lthread));\n \tlt->root_sched = THIS_SCHED;\n \n-\t/* set the function args and exit handlder */\n+\t/* set the function args and exit handler */\n \t_lthread_init(lt, fun, arg, _lthread_exit_handler);\n \n \t/* put it in the ready queue */\n@@ -384,7 +384,7 @@ void lthread_exit(void *ptr)\n \t}\n \n \n-\t/* wait until the joinging thread has collected the exit value */\n+\t/* wait until the joining thread has collected the exit value */\n \twhile (lt->join != LT_JOIN_EXIT_VAL_READ)\n \t\t_reschedule();\n \n@@ -410,7 +410,7 @@ int lthread_join(struct lthread *lt, void **ptr)\n \t/* invalid to join a detached thread, or a thread that is joined */\n \tif ((lt_state & BIT(ST_LT_DETACH)) || (lt->join == LT_JOIN_THREAD_SET))\n \t\treturn POSIX_ERRNO(EINVAL);\n-\t/* pointer to the joining thread and a poingter to return a value */\n+\t/* pointer to the joining thread and a pointer to return a value */\n \tlt->lt_join = current;\n \tcurrent->lt_exit_ptr = ptr;\n \t/* There is a race between lthread_join() and lthread_exit()\ndiff --git a/examples/performance-thread/common/lthread_diag.c b/examples/performance-thread/common/lthread_diag.c\nindex 57760a1e..b1bdf7a3 100644\n--- a/examples/performance-thread/common/lthread_diag.c\n+++ b/examples/performance-thread/common/lthread_diag.c\n@@ -232,7 +232,7 @@ lthread_sched_stats_display(void)\n }\n \n /*\n- * Defafult diagnostic callback\n+ * Default diagnostic callback\n  */\n static uint64_t\n _lthread_diag_default_cb(uint64_t time, struct lthread *lt, int diag_event,\ndiff --git a/examples/performance-thread/common/lthread_int.h b/examples/performance-thread/common/lthread_int.h\nindex d010126f..ec018e34 100644\n--- a/examples/performance-thread/common/lthread_int.h\n+++ b/examples/performance-thread/common/lthread_int.h\n@@ -107,7 +107,7 @@ enum join_st {\n \tLT_JOIN_EXIT_VAL_READ,\t/* joining thread has collected ret val */\n };\n \n-/* defnition of an lthread stack object */\n+/* definition of an lthread stack object */\n struct lthread_stack {\n \tuint8_t stack[LTHREAD_MAX_STACK_SIZE];\n \tsize_t stack_size;\ndiff --git a/examples/performance-thread/common/lthread_tls.c b/examples/performance-thread/common/lthread_tls.c\nindex 4ab2e355..bae45f2a 100644\n--- a/examples/performance-thread/common/lthread_tls.c\n+++ b/examples/performance-thread/common/lthread_tls.c\n@@ -215,7 +215,7 @@ void _lthread_tls_alloc(struct lthread *lt)\n \ttls->root_sched = (THIS_SCHED);\n \tlt->tls = tls;\n \n-\t/* allocate data for TLS varaiables using RTE_PER_LTHREAD macros */\n+\t/* allocate data for TLS variables using RTE_PER_LTHREAD macros */\n \tif (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE) {\n \t\tlt->per_lthread_data =\n \t\t    _lthread_objcache_alloc((THIS_SCHED)->per_lthread_cache);\ndiff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c\nindex 8a350405..eda5e701 100644\n--- a/examples/performance-thread/l3fwd-thread/main.c\n+++ b/examples/performance-thread/l3fwd-thread/main.c\n@@ -125,7 +125,7 @@ cb_parse_ptype(__rte_unused uint16_t port, __rte_unused uint16_t queue,\n }\n \n /*\n- *  When set to zero, simple forwaring path is eanbled.\n+ *  When set to zero, simple forwaring path is enabled.\n  *  When set to one, optimized forwarding path is enabled.\n  *  Note that LPM optimisation path uses SSE4.1 instructions.\n  */\n@@ -1529,7 +1529,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])\n }\n \n /*\n- * We group consecutive packets with the same destionation port into one burst.\n+ * We group consecutive packets with the same destination port into one burst.\n  * To avoid extra latency this is done together with some other packet\n  * processing, but after we made a final decision about packet's destination.\n  * To do this we maintain:\n@@ -1554,7 +1554,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])\n \n /*\n  * Group consecutive packets with the same destination port in bursts of 4.\n- * Suppose we have array of destionation ports:\n+ * Suppose we have array of destination ports:\n  * dst_port[] = {a, b, c, d,, e, ... }\n  * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.\n  * We doing 4 comparisons at once and the result is 4 bit mask.\n@@ -1565,7 +1565,7 @@ port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)\n {\n \tstatic const struct {\n \t\tuint64_t pnum; /* prebuild 4 values for pnum[]. */\n-\t\tint32_t  idx;  /* index for new last updated elemnet. */\n+\t\tint32_t  idx;  /* index for new last updated element. */\n \t\tuint16_t lpv;  /* add value to the last updated element. */\n \t} gptbl[GRPSZ] = {\n \t{\n@@ -1834,7 +1834,7 @@ process_burst(struct rte_mbuf *pkts_burst[MAX_PKT_BURST], int nb_rx,\n \n \t/*\n \t * Send packets out, through destination port.\n-\t * Consecuteve pacekts with the same destination port\n+\t * Consecutive packets with the same destination port\n \t * are already grouped together.\n \t * If destination port for the packet equals BAD_PORT,\n \t * then free the packet without sending it out.\n@@ -3514,7 +3514,7 @@ main(int argc, char **argv)\n \n \tret = rte_timer_subsystem_init();\n \tif (ret < 0)\n-\t\trte_exit(EXIT_FAILURE, \"Failed to initialize timer subystem\\n\");\n+\t\trte_exit(EXIT_FAILURE, \"Failed to initialize timer subsystem\\n\");\n \n \t/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */\n \tfor (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {\ndiff --git a/examples/performance-thread/pthread_shim/pthread_shim.h b/examples/performance-thread/pthread_shim/pthread_shim.h\nindex e90fb15f..ce51627a 100644\n--- a/examples/performance-thread/pthread_shim/pthread_shim.h\n+++ b/examples/performance-thread/pthread_shim/pthread_shim.h\n@@ -41,7 +41,7 @@\n  *\n  * The decision whether to invoke the real library function or the lthread\n  * function is controlled by a per pthread flag that can be switched\n- * on of off by the pthread_override_set() API described below. Typcially\n+ * on of off by the pthread_override_set() API described below. Typically\n  * this should be done as the first action of the initial lthread.\n  *\n  * N.B In general it would be poor practice to revert to invoke a real\ndiff --git a/examples/pipeline/examples/registers.spec b/examples/pipeline/examples/registers.spec\nindex 74a014ad..59998fef 100644\n--- a/examples/pipeline/examples/registers.spec\n+++ b/examples/pipeline/examples/registers.spec\n@@ -4,7 +4,7 @@\n ; This program is setting up two register arrays called \"pkt_counters\" and \"byte_counters\".\n ; On every input packet (Ethernet/IPv4), the \"pkt_counters\" register at location indexed by\n ; the IPv4 header \"Source Address\" field is incremented, while the same location in the\n-; \"byte_counters\" array accummulates the value of the IPv4 header \"Total Length\" field.\n+; \"byte_counters\" array accumulates the value of the IPv4 header \"Total Length\" field.\n ;\n ; The \"regrd\" and \"regwr\" CLI commands can be used to read and write the current value of\n ; any register array location.\ndiff --git a/examples/qos_sched/cmdline.c b/examples/qos_sched/cmdline.c\nindex 257b87a7..6691b02d 100644\n--- a/examples/qos_sched/cmdline.c\n+++ b/examples/qos_sched/cmdline.c\n@@ -41,7 +41,7 @@ static void cmd_help_parsed(__rte_unused void *parsed_result,\n \t\t\"    qavg port X subport Y pipe Z              : Show average queue size per pipe.\\n\"\n \t\t\"    qavg port X subport Y pipe Z tc A         : Show average queue size per pipe and TC.\\n\"\n \t\t\"    qavg port X subport Y pipe Z tc A q B     : Show average queue size of a specific queue.\\n\"\n-\t\t\"    qavg [n|period] X                     : Set number of times and peiod (us).\\n\\n\"\n+\t\t\"    qavg [n|period] X                     : Set number of times and period (us).\\n\\n\"\n \t);\n \n }\ndiff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c\nindex ba1c7e51..fc2aa5ff 100644\n--- a/examples/server_node_efd/node/node.c\n+++ b/examples/server_node_efd/node/node.c\n@@ -296,7 +296,7 @@ handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)\n \t\t}\n \t}\n }\n-/* >8 End of packets dequeueing. */\n+/* >8 End of packets dequeuing. */\n \n /*\n  * Application main function - loops through\ndiff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c\nindex 16435ee3..518cd721 100644\n--- a/examples/skeleton/basicfwd.c\n+++ b/examples/skeleton/basicfwd.c\n@@ -179,7 +179,7 @@ main(int argc, char *argv[])\n \tint ret = rte_eal_init(argc, argv);\n \tif (ret < 0)\n \t\trte_exit(EXIT_FAILURE, \"Error with EAL initialization\\n\");\n-\t/* >8 End of initializion the Environment Abstraction Layer (EAL). */\n+\t/* >8 End of initialization the Environment Abstraction Layer (EAL). */\n \n \targc -= ret;\n \targv += ret;\ndiff --git a/examples/vhost/main.c b/examples/vhost/main.c\nindex 33d023aa..b65e80b7 100644\n--- a/examples/vhost/main.c\n+++ b/examples/vhost/main.c\n@@ -107,7 +107,7 @@ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;\n static char *socket_files;\n static int nb_sockets;\n \n-/* empty vmdq configuration structure. Filled in programatically */\n+/* empty vmdq configuration structure. Filled in programmatically */\n static struct rte_eth_conf vmdq_conf_default = {\n \t.rxmode = {\n \t\t.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,\n@@ -115,7 +115,7 @@ static struct rte_eth_conf vmdq_conf_default = {\n \t\t/*\n \t\t * VLAN strip is necessary for 1G NIC such as I350,\n \t\t * this fixes bug of ipv4 forwarding in guest can't\n-\t\t * forward pakets from one virtio dev to another virtio dev.\n+\t\t * forward packets from one virtio dev to another virtio dev.\n \t\t */\n \t\t.offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,\n \t},\n@@ -463,7 +463,7 @@ us_vhost_usage(const char *prgname)\n \t\"\t\t--nb-devices ND\\n\"\n \t\"\t\t-p PORTMASK: Set mask for ports to be used by application\\n\"\n \t\"\t\t--vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\\n\"\n-\t\"\t\t--rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\\n\"\n+\t\"\t\t--rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destination queue is full\\n\"\n \t\"\t\t--rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\\n\"\n \t\"\t\t--rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\\n\"\n \t\"\t\t--mergeable [0|1]: disable(default)/enable RX mergeable buffers\\n\"\n@@ -1289,7 +1289,7 @@ switch_worker(void *arg __rte_unused)\n \tstruct vhost_dev *vdev;\n \tstruct mbuf_table *tx_q;\n \n-\tRTE_LOG(INFO, VHOST_DATA, \"Procesing on Core %u started\\n\", lcore_id);\n+\tRTE_LOG(INFO, VHOST_DATA, \"Processing on Core %u started\\n\", lcore_id);\n \n \ttx_q = &lcore_tx_queue[lcore_id];\n \tfor (i = 0; i < rte_lcore_count(); i++) {\n@@ -1333,7 +1333,7 @@ switch_worker(void *arg __rte_unused)\n \n /*\n  * Remove a device from the specific data core linked list and from the\n- * main linked list. Synchonization  occurs through the use of the\n+ * main linked list. Synchronization  occurs through the use of the\n  * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering\n  * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.\n  */\ndiff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c\nindex 9064fc3a..1b646059 100644\n--- a/examples/vhost/virtio_net.c\n+++ b/examples/vhost/virtio_net.c\n@@ -62,7 +62,7 @@ enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,\n \t    struct rte_mbuf *m, uint16_t desc_idx)\n {\n \tuint32_t desc_avail, desc_offset;\n-\tuint64_t desc_chunck_len;\n+\tuint64_t desc_chunk_len;\n \tuint32_t mbuf_avail, mbuf_offset;\n \tuint32_t cpy_len;\n \tstruct vring_desc *desc;\n@@ -72,10 +72,10 @@ enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,\n \tuint16_t nr_desc = 1;\n \n \tdesc = &vr->desc[desc_idx];\n-\tdesc_chunck_len = desc->len;\n+\tdesc_chunk_len = desc->len;\n \tdesc_gaddr = desc->addr;\n \tdesc_addr = rte_vhost_va_from_guest_pa(\n-\t\t\tdev->mem, desc_gaddr, &desc_chunck_len);\n+\t\t\tdev->mem, desc_gaddr, &desc_chunk_len);\n \t/*\n \t * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid\n \t * performance issue with some versions of gcc (4.8.4 and 5.3.0) which\n@@ -87,7 +87,7 @@ enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,\n \trte_prefetch0((void *)(uintptr_t)desc_addr);\n \n \t/* write virtio-net header */\n-\tif (likely(desc_chunck_len >= dev->hdr_len)) {\n+\tif (likely(desc_chunk_len >= dev->hdr_len)) {\n \t\t*(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr;\n \t\tdesc_offset = dev->hdr_len;\n \t} else {\n@@ -112,11 +112,11 @@ enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,\n \t\t\tsrc += len;\n \t\t}\n \n-\t\tdesc_chunck_len = desc->len - dev->hdr_len;\n+\t\tdesc_chunk_len = desc->len - dev->hdr_len;\n \t\tdesc_gaddr += dev->hdr_len;\n \t\tdesc_addr = rte_vhost_va_from_guest_pa(\n \t\t\t\tdev->mem, desc_gaddr,\n-\t\t\t\t&desc_chunck_len);\n+\t\t\t\t&desc_chunk_len);\n \t\tif (unlikely(!desc_addr))\n \t\t\treturn -1;\n \n@@ -147,28 +147,28 @@ enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,\n \t\t\t\treturn -1;\n \n \t\t\tdesc = &vr->desc[desc->next];\n-\t\t\tdesc_chunck_len = desc->len;\n+\t\t\tdesc_chunk_len = desc->len;\n \t\t\tdesc_gaddr = desc->addr;\n \t\t\tdesc_addr = rte_vhost_va_from_guest_pa(\n-\t\t\t\t\tdev->mem, desc_gaddr, &desc_chunck_len);\n+\t\t\t\t\tdev->mem, desc_gaddr, &desc_chunk_len);\n \t\t\tif (unlikely(!desc_addr))\n \t\t\t\treturn -1;\n \n \t\t\tdesc_offset = 0;\n \t\t\tdesc_avail  = desc->len;\n-\t\t} else if (unlikely(desc_chunck_len == 0)) {\n-\t\t\tdesc_chunck_len = desc_avail;\n+\t\t} else if (unlikely(desc_chunk_len == 0)) {\n+\t\t\tdesc_chunk_len = desc_avail;\n \t\t\tdesc_gaddr += desc_offset;\n \t\t\tdesc_addr = rte_vhost_va_from_guest_pa(dev->mem,\n \t\t\t\t\tdesc_gaddr,\n-\t\t\t\t\t&desc_chunck_len);\n+\t\t\t\t\t&desc_chunk_len);\n \t\t\tif (unlikely(!desc_addr))\n \t\t\t\treturn -1;\n \n \t\t\tdesc_offset = 0;\n \t\t}\n \n-\t\tcpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);\n+\t\tcpy_len = RTE_MIN(desc_chunk_len, mbuf_avail);\n \t\trte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),\n \t\t\trte_pktmbuf_mtod_offset(m, void *, mbuf_offset),\n \t\t\tcpy_len);\n@@ -177,7 +177,7 @@ enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,\n \t\tmbuf_offset += cpy_len;\n \t\tdesc_avail  -= cpy_len;\n \t\tdesc_offset += cpy_len;\n-\t\tdesc_chunck_len -= cpy_len;\n+\t\tdesc_chunk_len -= cpy_len;\n \t}\n \n \treturn 0;\n@@ -246,7 +246,7 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,\n \tstruct vring_desc *desc;\n \tuint64_t desc_addr, desc_gaddr;\n \tuint32_t desc_avail, desc_offset;\n-\tuint64_t desc_chunck_len;\n+\tuint64_t desc_chunk_len;\n \tuint32_t mbuf_avail, mbuf_offset;\n \tuint32_t cpy_len;\n \tstruct rte_mbuf *cur = m, *prev = m;\n@@ -258,10 +258,10 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,\n \t\t\t(desc->flags & VRING_DESC_F_INDIRECT))\n \t\treturn -1;\n \n-\tdesc_chunck_len = desc->len;\n+\tdesc_chunk_len = desc->len;\n \tdesc_gaddr = desc->addr;\n \tdesc_addr = rte_vhost_va_from_guest_pa(\n-\t\t\tdev->mem, desc_gaddr, &desc_chunck_len);\n+\t\t\tdev->mem, desc_gaddr, &desc_chunk_len);\n \tif (unlikely(!desc_addr))\n \t\treturn -1;\n \n@@ -275,10 +275,10 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,\n \t * header.\n \t */\n \tdesc = &vr->desc[desc->next];\n-\tdesc_chunck_len = desc->len;\n+\tdesc_chunk_len = desc->len;\n \tdesc_gaddr = desc->addr;\n \tdesc_addr = rte_vhost_va_from_guest_pa(\n-\t\t\tdev->mem, desc_gaddr, &desc_chunck_len);\n+\t\t\tdev->mem, desc_gaddr, &desc_chunk_len);\n \tif (unlikely(!desc_addr))\n \t\treturn -1;\n \trte_prefetch0((void *)(uintptr_t)desc_addr);\n@@ -290,7 +290,7 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,\n \tmbuf_offset = 0;\n \tmbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;\n \twhile (1) {\n-\t\tcpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);\n+\t\tcpy_len = RTE_MIN(desc_chunk_len, mbuf_avail);\n \t\trte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,\n \t\t\t\t\t\t   mbuf_offset),\n \t\t\t(void *)((uintptr_t)(desc_addr + desc_offset)),\n@@ -300,7 +300,7 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,\n \t\tmbuf_offset += cpy_len;\n \t\tdesc_avail  -= cpy_len;\n \t\tdesc_offset += cpy_len;\n-\t\tdesc_chunck_len -= cpy_len;\n+\t\tdesc_chunk_len -= cpy_len;\n \n \t\t/* This desc reaches to its end, get the next one */\n \t\tif (desc_avail == 0) {\n@@ -312,22 +312,22 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,\n \t\t\t\treturn -1;\n \t\t\tdesc = &vr->desc[desc->next];\n \n-\t\t\tdesc_chunck_len = desc->len;\n+\t\t\tdesc_chunk_len = desc->len;\n \t\t\tdesc_gaddr = desc->addr;\n \t\t\tdesc_addr = rte_vhost_va_from_guest_pa(\n-\t\t\t\t\tdev->mem, desc_gaddr, &desc_chunck_len);\n+\t\t\t\t\tdev->mem, desc_gaddr, &desc_chunk_len);\n \t\t\tif (unlikely(!desc_addr))\n \t\t\t\treturn -1;\n \t\t\trte_prefetch0((void *)(uintptr_t)desc_addr);\n \n \t\t\tdesc_offset = 0;\n \t\t\tdesc_avail  = desc->len;\n-\t\t} else if (unlikely(desc_chunck_len == 0)) {\n-\t\t\tdesc_chunck_len = desc_avail;\n+\t\t} else if (unlikely(desc_chunk_len == 0)) {\n+\t\t\tdesc_chunk_len = desc_avail;\n \t\t\tdesc_gaddr += desc_offset;\n \t\t\tdesc_addr = rte_vhost_va_from_guest_pa(dev->mem,\n \t\t\t\t\tdesc_gaddr,\n-\t\t\t\t\t&desc_chunck_len);\n+\t\t\t\t\t&desc_chunk_len);\n \t\t\tif (unlikely(!desc_addr))\n \t\t\t\treturn -1;\n \ndiff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c\nindex d767423a..97b8def7 100644\n--- a/examples/vm_power_manager/channel_monitor.c\n+++ b/examples/vm_power_manager/channel_monitor.c\n@@ -404,7 +404,7 @@ get_pcpu_to_control(struct policy *pol)\n \n \t/*\n \t * So now that we're handling virtual and physical cores, we need to\n-\t * differenciate between them when adding them to the branch monitor.\n+\t * differentiate between them when adding them to the branch monitor.\n \t * Virtual cores need to be converted to physical cores.\n \t */\n \tif (pol->pkt.core_type == RTE_POWER_CORE_TYPE_VIRTUAL) {\ndiff --git a/examples/vm_power_manager/power_manager.h b/examples/vm_power_manager/power_manager.h\nindex d35f8cbe..d51039e2 100644\n--- a/examples/vm_power_manager/power_manager.h\n+++ b/examples/vm_power_manager/power_manager.h\n@@ -224,7 +224,7 @@ int power_manager_enable_turbo_core(unsigned int core_num);\n int power_manager_disable_turbo_core(unsigned int core_num);\n \n /**\n- * Get the current freuency of the core specified by core_num\n+ * Get the current frequency of the core specified by core_num\n  *\n  * @param core_num\n  *  The core number to get the current frequency\ndiff --git a/examples/vmdq/main.c b/examples/vmdq/main.c\nindex 2c00a942..2a294635 100644\n--- a/examples/vmdq/main.c\n+++ b/examples/vmdq/main.c\n@@ -62,7 +62,7 @@ static uint8_t rss_enable;\n \n /* Default structure for VMDq. 8< */\n \n-/* empty vmdq configuration structure. Filled in programatically */\n+/* empty vmdq configuration structure. Filled in programmatically */\n static const struct rte_eth_conf vmdq_conf_default = {\n \t.rxmode = {\n \t\t.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,\ndiff --git a/kernel/linux/kni/kni_fifo.h b/kernel/linux/kni/kni_fifo.h\nindex 5c91b553..791552af 100644\n--- a/kernel/linux/kni/kni_fifo.h\n+++ b/kernel/linux/kni/kni_fifo.h\n@@ -41,7 +41,7 @@ kni_fifo_put(struct rte_kni_fifo *fifo, void **data, uint32_t num)\n }\n \n /**\n- * Get up to num elements from the fifo. Return the number actully read\n+ * Get up to num elements from the fifo. Return the number actually read\n  */\n static inline uint32_t\n kni_fifo_get(struct rte_kni_fifo *fifo, void **data, uint32_t num)\ndiff --git a/lib/acl/acl_bld.c b/lib/acl/acl_bld.c\nindex f316d3e8..7ea30f41 100644\n--- a/lib/acl/acl_bld.c\n+++ b/lib/acl/acl_bld.c\n@@ -885,7 +885,7 @@ acl_gen_range_trie(struct acl_build_context *context,\n \t\treturn root;\n \t}\n \n-\t/* gather information about divirgent paths */\n+\t/* gather information about divergent paths */\n \tlo_00 = 0;\n \thi_ff = UINT8_MAX;\n \tfor (k = n - 1; k >= 0; k--) {\ndiff --git a/lib/acl/acl_run_altivec.h b/lib/acl/acl_run_altivec.h\nindex 2de6f27b..24a41eec 100644\n--- a/lib/acl/acl_run_altivec.h\n+++ b/lib/acl/acl_run_altivec.h\n@@ -146,7 +146,7 @@ transition4(xmm_t next_input, const uint64_t *trans,\n \n \tdfa_ofs = vec_sub(t, r);\n \n-\t/* QUAD/SINGLE caluclations. */\n+\t/* QUAD/SINGLE calculations. */\n \tt = (xmm_t)vec_cmpgt((vector signed char)in, (vector signed char)tr_hi);\n \tt = (xmm_t)vec_sel(\n \t\tvec_sel(\ndiff --git a/lib/acl/acl_run_avx512.c b/lib/acl/acl_run_avx512.c\nindex 78fbe34f..3b879556 100644\n--- a/lib/acl/acl_run_avx512.c\n+++ b/lib/acl/acl_run_avx512.c\n@@ -64,7 +64,7 @@ update_flow_mask(const struct acl_flow_avx512 *flow, uint32_t *fmsk,\n }\n \n /*\n- * Resolve matches for multiple categories (LE 8, use 128b instuctions/regs)\n+ * Resolve matches for multiple categories (LE 8, use 128b instructions/regs)\n  */\n static inline void\n resolve_mcle8_avx512x1(uint32_t result[],\ndiff --git a/lib/acl/acl_run_avx512x16.h b/lib/acl/acl_run_avx512x16.h\nindex 48bb6fed..c8e6a124 100644\n--- a/lib/acl/acl_run_avx512x16.h\n+++ b/lib/acl/acl_run_avx512x16.h\n@@ -10,7 +10,7 @@\n  */\n \n /*\n- * This implementation uses 512-bit registers(zmm) and instrincts.\n+ * This implementation uses 512-bit registers(zmm) and instincts.\n  * So our main SIMD type is 512-bit width and each such variable can\n  * process sizeof(__m512i) / sizeof(uint32_t) == 16 entries in parallel.\n  */\n@@ -25,20 +25,20 @@\n #define _F_(x)\t\tx##_avx512x16\n \n /*\n- * Same instrincts have different syntaxis (depending on the bit-width),\n+ * Same instincts have different syntaxis (depending on the bit-width),\n  * so to overcome that few macros need to be defined.\n  */\n \n-/* Naming convention for generic epi(packed integers) type instrincts. */\n+/* Naming convention for generic epi(packed integers) type instincts. */\n #define _M_I_(x)\t_mm512_##x\n \n-/* Naming convention for si(whole simd integer) type instrincts. */\n+/* Naming convention for si(whole simd integer) type instincts. */\n #define _M_SI_(x)\t_mm512_##x##_si512\n \n-/* Naming convention for masked gather type instrincts. */\n+/* Naming convention for masked gather type instincts. */\n #define _M_MGI_(x)\t_mm512_##x\n \n-/* Naming convention for gather type instrincts. */\n+/* Naming convention for gather type instincts. */\n #define _M_GI_(name, idx, base, scale)\t_mm512_##name(idx, base, scale)\n \n /* num/mask of transitions per SIMD regs */\n@@ -239,7 +239,7 @@ _F_(gather_bytes)(__m512i zero, const __m512i p[2], const uint32_t m[2],\n }\n \n /*\n- * Resolve matches for multiple categories (GT 8, use 512b instuctions/regs)\n+ * Resolve matches for multiple categories (GT 8, use 512b instructions/regs)\n  */\n static inline void\n resolve_mcgt8_avx512x1(uint32_t result[],\ndiff --git a/lib/acl/acl_run_avx512x8.h b/lib/acl/acl_run_avx512x8.h\nindex 61ac9d1b..edd5c554 100644\n--- a/lib/acl/acl_run_avx512x8.h\n+++ b/lib/acl/acl_run_avx512x8.h\n@@ -10,7 +10,7 @@\n  */\n \n /*\n- * This implementation uses 256-bit registers(ymm) and instrincts.\n+ * This implementation uses 256-bit registers(ymm) and instincts.\n  * So our main SIMD type is 256-bit width and each such variable can\n  * process sizeof(__m256i) / sizeof(uint32_t) == 8 entries in parallel.\n  */\n@@ -25,20 +25,20 @@\n #define _F_(x)\t\tx##_avx512x8\n \n /*\n- * Same instrincts have different syntaxis (depending on the bit-width),\n+ * Same instincts have different syntaxis (depending on the bit-width),\n  * so to overcome that few macros need to be defined.\n  */\n \n-/* Naming convention for generic epi(packed integers) type instrincts. */\n+/* Naming convention for generic epi(packed integers) type instincts. */\n #define _M_I_(x)\t_mm256_##x\n \n-/* Naming convention for si(whole simd integer) type instrincts. */\n+/* Naming convention for si(whole simd integer) type instincts. */\n #define _M_SI_(x)\t_mm256_##x##_si256\n \n-/* Naming convention for masked gather type instrincts. */\n+/* Naming convention for masked gather type instincts. */\n #define _M_MGI_(x)\t_mm256_m##x\n \n-/* Naming convention for gather type instrincts. */\n+/* Naming convention for gather type instincts. */\n #define _M_GI_(name, idx, base, scale)\t_mm256_##name(base, idx, scale)\n \n /* num/mask of transitions per SIMD regs */\ndiff --git a/lib/bpf/bpf_convert.c b/lib/bpf/bpf_convert.c\nindex db84add7..9563274c 100644\n--- a/lib/bpf/bpf_convert.c\n+++ b/lib/bpf/bpf_convert.c\n@@ -412,7 +412,7 @@ static int bpf_convert_filter(const struct bpf_insn *prog, size_t len,\n \t\t\tBPF_EMIT_JMP;\n \t\t\tbreak;\n \n-\t\t\t/* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */\n+\t\t\t/* ldxb 4 * ([14] & 0xf) is remapped into 6 insns. */\n \t\tcase BPF_LDX | BPF_MSH | BPF_B:\n \t\t\t/* tmp = A */\n \t\t\t*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);\n@@ -428,7 +428,7 @@ static int bpf_convert_filter(const struct bpf_insn *prog, size_t len,\n \t\t\t*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);\n \t\t\tbreak;\n \n-\t\t\t/* RET_K is remaped into 2 insns. RET_A case doesn't need an\n+\t\t\t/* RET_K is remapped into 2 insns. RET_A case doesn't need an\n \t\t\t * extra mov as EBPF_REG_0 is already mapped into BPF_REG_A.\n \t\t\t */\n \t\tcase BPF_RET | BPF_A:\ndiff --git a/lib/bpf/bpf_validate.c b/lib/bpf/bpf_validate.c\nindex 09331258..2426d57a 100644\n--- a/lib/bpf/bpf_validate.c\n+++ b/lib/bpf/bpf_validate.c\n@@ -856,7 +856,7 @@ eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)\n \tstatic const struct {\n \t\tsize_t off;\n \t\tsize_t sz;\n-\t} mbuf_ro_fileds[] = {\n+\t} mbuf_ro_fields[] = {\n \t\t{ .off = offsetof(struct rte_mbuf, buf_addr), },\n \t\t{ .off = offsetof(struct rte_mbuf, refcnt), },\n \t\t{ .off = offsetof(struct rte_mbuf, nb_segs), },\n@@ -866,13 +866,13 @@ eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)\n \t\t{ .off = offsetof(struct rte_mbuf, priv_size), },\n \t};\n \n-\tfor (i = 0; i != RTE_DIM(mbuf_ro_fileds) &&\n-\t\t\t(mbuf_ro_fileds[i].off + mbuf_ro_fileds[i].sz <=\n-\t\t\trv->u.max || rv->u.max + opsz <= mbuf_ro_fileds[i].off);\n+\tfor (i = 0; i != RTE_DIM(mbuf_ro_fields) &&\n+\t\t\t(mbuf_ro_fields[i].off + mbuf_ro_fields[i].sz <=\n+\t\t\trv->u.max || rv->u.max + opsz <= mbuf_ro_fields[i].off);\n \t\t\ti++)\n \t\t;\n \n-\tif (i != RTE_DIM(mbuf_ro_fileds))\n+\tif (i != RTE_DIM(mbuf_ro_fields))\n \t\treturn \"store to the read-only mbuf field\";\n \n \treturn NULL;\ndiff --git a/lib/cryptodev/rte_cryptodev.h b/lib/cryptodev/rte_cryptodev.h\nindex 59ea5a54..5f5cd029 100644\n--- a/lib/cryptodev/rte_cryptodev.h\n+++ b/lib/cryptodev/rte_cryptodev.h\n@@ -27,7 +27,7 @@ extern \"C\" {\n \n #include \"rte_cryptodev_trace_fp.h\"\n \n-extern const char **rte_cyptodev_names;\n+extern const char **rte_cryptodev_names;\n \n /* Logging Macros */\n \ndiff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h\nindex 9942c6ec..4abe79c5 100644\n--- a/lib/dmadev/rte_dmadev.h\n+++ b/lib/dmadev/rte_dmadev.h\n@@ -533,7 +533,7 @@ struct rte_dma_port_param {\n \t\t * @note If some fields can not be supported by the\n \t\t * hardware/driver, then the driver ignores those fields.\n \t\t * Please check driver-specific documentation for limitations\n-\t\t * and capablites.\n+\t\t * and capabilities.\n \t\t */\n \t\t__extension__\n \t\tstruct {\n@@ -731,7 +731,7 @@ enum rte_dma_status_code {\n \t/** The operation completed successfully. */\n \tRTE_DMA_STATUS_SUCCESSFUL,\n \t/** The operation failed to complete due abort by user.\n-\t * This is mainly used when processing dev_stop, user could modidy the\n+\t * This is mainly used when processing dev_stop, user could modify the\n \t * descriptors (e.g. change one bit to tell hardware abort this job),\n \t * it allows outstanding requests to be complete as much as possible,\n \t * so reduce the time to stop the device.\ndiff --git a/lib/eal/arm/include/rte_cycles_32.h b/lib/eal/arm/include/rte_cycles_32.h\nindex f79718ce..cec4d69e 100644\n--- a/lib/eal/arm/include/rte_cycles_32.h\n+++ b/lib/eal/arm/include/rte_cycles_32.h\n@@ -30,7 +30,7 @@ extern \"C\" {\n \n /**\n  * This call is easily portable to any architecture, however,\n- * it may require a system call and inprecise for some tasks.\n+ * it may require a system call and imprecise for some tasks.\n  */\n static inline uint64_t\n __rte_rdtsc_syscall(void)\ndiff --git a/lib/eal/common/eal_common_trace_ctf.c b/lib/eal/common/eal_common_trace_ctf.c\nindex 33e419aa..8f245941 100644\n--- a/lib/eal/common/eal_common_trace_ctf.c\n+++ b/lib/eal/common/eal_common_trace_ctf.c\n@@ -321,7 +321,7 @@ meta_fix_freq(struct trace *trace, char *meta)\n static void\n meta_fix_freq_offset(struct trace *trace, char *meta)\n {\n-\tuint64_t uptime_tickes_floor, uptime_ticks, freq, uptime_sec;\n+\tuint64_t uptime_ticks_floor, uptime_ticks, freq, uptime_sec;\n \tuint64_t offset, offset_s;\n \tchar *str;\n \tint rc;\n@@ -329,12 +329,12 @@ meta_fix_freq_offset(struct trace *trace, char *meta)\n \tuptime_ticks = trace->uptime_ticks &\n \t\t\t((1ULL << __RTE_TRACE_EVENT_HEADER_ID_SHIFT) - 1);\n \tfreq = rte_get_tsc_hz();\n-\tuptime_tickes_floor = RTE_ALIGN_MUL_FLOOR(uptime_ticks, freq);\n+\tuptime_ticks_floor = RTE_ALIGN_MUL_FLOOR(uptime_ticks, freq);\n \n-\tuptime_sec = uptime_tickes_floor / freq;\n+\tuptime_sec = uptime_ticks_floor / freq;\n \toffset_s = trace->epoch_sec - uptime_sec;\n \n-\toffset = uptime_ticks - uptime_tickes_floor;\n+\toffset = uptime_ticks - uptime_ticks_floor;\n \toffset += trace->epoch_nsec * (freq / NSEC_PER_SEC);\n \n \tstr = RTE_PTR_ADD(meta, trace->ctf_meta_offset_freq_off_s);\ndiff --git a/lib/eal/freebsd/eal_interrupts.c b/lib/eal/freebsd/eal_interrupts.c\nindex 10aa91cc..9f720bdc 100644\n--- a/lib/eal/freebsd/eal_interrupts.c\n+++ b/lib/eal/freebsd/eal_interrupts.c\n@@ -234,7 +234,7 @@ rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,\n \n \trte_spinlock_lock(&intr_lock);\n \n-\t/* check if the insterrupt source for the fd is existent */\n+\t/* check if the interrupt source for the fd is existent */\n \tTAILQ_FOREACH(src, &intr_sources, next)\n \t\tif (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))\n \t\t\tbreak;\n@@ -288,7 +288,7 @@ rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,\n \n \trte_spinlock_lock(&intr_lock);\n \n-\t/* check if the insterrupt source for the fd is existent */\n+\t/* check if the interrupt source for the fd is existent */\n \tTAILQ_FOREACH(src, &intr_sources, next)\n \t\tif (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))\n \t\t\tbreak;\ndiff --git a/lib/eal/include/generic/rte_pflock.h b/lib/eal/include/generic/rte_pflock.h\nindex b9de063c..e7bb29b3 100644\n--- a/lib/eal/include/generic/rte_pflock.h\n+++ b/lib/eal/include/generic/rte_pflock.h\n@@ -157,7 +157,7 @@ rte_pflock_write_lock(rte_pflock_t *pf)\n \tuint16_t ticket, w;\n \n \t/* Acquire ownership of write-phase.\n-\t * This is same as rte_tickelock_lock().\n+\t * This is same as rte_ticketlock_lock().\n \t */\n \tticket = __atomic_fetch_add(&pf->wr.in, 1, __ATOMIC_RELAXED);\n \trte_wait_until_equal_16(&pf->wr.out, ticket, __ATOMIC_ACQUIRE);\ndiff --git a/lib/eal/include/rte_malloc.h b/lib/eal/include/rte_malloc.h\nindex ed02e151..3892519f 100644\n--- a/lib/eal/include/rte_malloc.h\n+++ b/lib/eal/include/rte_malloc.h\n@@ -58,7 +58,7 @@ rte_malloc(const char *type, size_t size, unsigned align)\n \t__rte_alloc_size(2);\n \n /**\n- * Allocate zero'ed memory from the heap.\n+ * Allocate zeroed memory from the heap.\n  *\n  * Equivalent to rte_malloc() except that the memory zone is\n  * initialised with zeros. In NUMA systems, the memory allocated resides on the\n@@ -189,7 +189,7 @@ rte_malloc_socket(const char *type, size_t size, unsigned align, int socket)\n \t__rte_alloc_size(2);\n \n /**\n- * Allocate zero'ed memory from the heap.\n+ * Allocate zeroed memory from the heap.\n  *\n  * Equivalent to rte_malloc() except that the memory zone is\n  * initialised with zeros.\ndiff --git a/lib/eal/linux/eal_interrupts.c b/lib/eal/linux/eal_interrupts.c\nindex 6e3925ef..70060bf3 100644\n--- a/lib/eal/linux/eal_interrupts.c\n+++ b/lib/eal/linux/eal_interrupts.c\n@@ -589,7 +589,7 @@ rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,\n \n \trte_spinlock_lock(&intr_lock);\n \n-\t/* check if the insterrupt source for the fd is existent */\n+\t/* check if the interrupt source for the fd is existent */\n \tTAILQ_FOREACH(src, &intr_sources, next) {\n \t\tif (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))\n \t\t\tbreak;\n@@ -639,7 +639,7 @@ rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,\n \n \trte_spinlock_lock(&intr_lock);\n \n-\t/* check if the insterrupt source for the fd is existent */\n+\t/* check if the interrupt source for the fd is existent */\n \tTAILQ_FOREACH(src, &intr_sources, next)\n \t\tif (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))\n \t\t\tbreak;\ndiff --git a/lib/eal/linux/eal_vfio.h b/lib/eal/linux/eal_vfio.h\nindex 6ebaca6a..c5d5f705 100644\n--- a/lib/eal/linux/eal_vfio.h\n+++ b/lib/eal/linux/eal_vfio.h\n@@ -103,7 +103,7 @@ struct vfio_group {\n typedef int (*vfio_dma_func_t)(int);\n \n /* Custom memory region DMA mapping function prototype.\n- * Takes VFIO container fd, virtual address, phisical address, length and\n+ * Takes VFIO container fd, virtual address, physical address, length and\n  * operation type (0 to unmap 1 for map) as a parameters.\n  * Returns 0 on success, -1 on error.\n  **/\ndiff --git a/lib/eal/windows/eal_windows.h b/lib/eal/windows/eal_windows.h\nindex 23ead6d3..245aa603 100644\n--- a/lib/eal/windows/eal_windows.h\n+++ b/lib/eal/windows/eal_windows.h\n@@ -63,7 +63,7 @@ unsigned int eal_socket_numa_node(unsigned int socket_id);\n  * @param arg\n  *  Argument to the called function.\n  * @return\n- *  0 on success, netagive error code on failure.\n+ *  0 on success, negative error code on failure.\n  */\n int eal_intr_thread_schedule(void (*func)(void *arg), void *arg);\n \ndiff --git a/lib/eal/windows/include/dirent.h b/lib/eal/windows/include/dirent.h\nindex 869a5983..34eb077f 100644\n--- a/lib/eal/windows/include/dirent.h\n+++ b/lib/eal/windows/include/dirent.h\n@@ -440,7 +440,7 @@ opendir(const char *dirname)\n  * display correctly on console. The problem can be fixed in two ways:\n  * (1) change the character set of console to 1252 using chcp utility\n  * and use Lucida Console font, or (2) use _cprintf function when\n- * writing to console. The _cprinf() will re-encode ANSI strings to the\n+ * writing to console. The _cprintf() will re-encode ANSI strings to the\n  * console code page so many non-ASCII characters will display correctly.\n  */\n static struct dirent*\n@@ -579,7 +579,7 @@ dirent_mbstowcs_s(\n \t\t\twcstr[n] = 0;\n \t\t}\n \n-\t\t/* Length of resuting multi-byte string WITH zero\n+\t\t/* Length of resulting multi-byte string WITH zero\n \t\t *terminator\n \t\t */\n \t\tif (pReturnValue)\ndiff --git a/lib/eal/windows/include/fnmatch.h b/lib/eal/windows/include/fnmatch.h\nindex c272f65c..c6b226bd 100644\n--- a/lib/eal/windows/include/fnmatch.h\n+++ b/lib/eal/windows/include/fnmatch.h\n@@ -26,14 +26,14 @@ extern \"C\" {\n #define FNM_PREFIX_DIRS 0x20\n \n /**\n- * This function is used for searhing a given string source\n+ * This function is used for searching a given string source\n  * with the given regular expression pattern.\n  *\n  * @param pattern\n  *\tregular expression notation describing the pattern to match\n  *\n  * @param string\n- *\tsource string to searcg for the pattern\n+ *\tsource string to search for the pattern\n  *\n  * @param flag\n  *\tcontaining information about the pattern\ndiff --git a/lib/eal/x86/include/rte_atomic.h b/lib/eal/x86/include/rte_atomic.h\nindex 915afd9d..f2ee1a9c 100644\n--- a/lib/eal/x86/include/rte_atomic.h\n+++ b/lib/eal/x86/include/rte_atomic.h\n@@ -60,7 +60,7 @@ extern \"C\" {\n  * Basic idea is to use lock prefixed add with some dummy memory location\n  * as the destination. From their experiments 128B(2 cache lines) below\n  * current stack pointer looks like a good candidate.\n- * So below we use that techinque for rte_smp_mb() implementation.\n+ * So below we use that technique for rte_smp_mb() implementation.\n  */\n \n static __rte_always_inline void\ndiff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c\nindex 809416d9..3182b52c 100644\n--- a/lib/eventdev/rte_event_eth_rx_adapter.c\n+++ b/lib/eventdev/rte_event_eth_rx_adapter.c\n@@ -3334,7 +3334,7 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused,\n \ttoken = strtok(NULL, \"\\0\");\n \tif (token != NULL)\n \t\tRTE_EDEV_LOG_ERR(\"Extra parameters passed to eventdev\"\n-\t\t\t\t \" telemetry command, igrnoring\");\n+\t\t\t\t \" telemetry command, ignoring\");\n \n \tif (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,\n \t\t\t\t\t\t    rx_queue_id, &queue_conf)) {\n@@ -3398,7 +3398,7 @@ handle_rxa_get_queue_stats(const char *cmd __rte_unused,\n \ttoken = strtok(NULL, \"\\0\");\n \tif (token != NULL)\n \t\tRTE_EDEV_LOG_ERR(\"Extra parameters passed to eventdev\"\n-\t\t\t\t \" telemetry command, igrnoring\");\n+\t\t\t\t \" telemetry command, ignoring\");\n \n \tif (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,\n \t\t\t\t\t\t    rx_queue_id, &q_stats)) {\n@@ -3460,7 +3460,7 @@ handle_rxa_queue_stats_reset(const char *cmd __rte_unused,\n \ttoken = strtok(NULL, \"\\0\");\n \tif (token != NULL)\n \t\tRTE_EDEV_LOG_ERR(\"Extra parameters passed to eventdev\"\n-\t\t\t\t \" telemetry command, igrnoring\");\n+\t\t\t\t \" telemetry command, ignoring\");\n \n \tif (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,\n \t\t\t\t\t\t       eth_dev_id,\ndiff --git a/lib/fib/rte_fib.c b/lib/fib/rte_fib.c\nindex 6ca180d7..ad0b85bc 100644\n--- a/lib/fib/rte_fib.c\n+++ b/lib/fib/rte_fib.c\n@@ -40,10 +40,10 @@ EAL_REGISTER_TAILQ(rte_fib_tailq)\n struct rte_fib {\n \tchar\t\t\tname[RTE_FIB_NAMESIZE];\n \tenum rte_fib_type\ttype;\t/**< Type of FIB struct */\n-\tstruct rte_rib\t\t*rib;\t/**< RIB helper datastruct */\n+\tstruct rte_rib\t\t*rib;\t/**< RIB helper datastructure */\n \tvoid\t\t\t*dp;\t/**< pointer to the dataplane struct*/\n \trte_fib_lookup_fn_t\tlookup;\t/**< fib lookup function */\n-\trte_fib_modify_fn_t\tmodify; /**< modify fib datastruct */\n+\trte_fib_modify_fn_t\tmodify; /**< modify fib datastructure */\n \tuint64_t\t\tdef_nh;\n };\n \ndiff --git a/lib/fib/rte_fib.h b/lib/fib/rte_fib.h\nindex b3c59dfa..e592d325 100644\n--- a/lib/fib/rte_fib.h\n+++ b/lib/fib/rte_fib.h\n@@ -189,7 +189,7 @@ rte_fib_lookup_bulk(struct rte_fib *fib, uint32_t *ips,\n  *   FIB object handle\n  * @return\n  *   Pointer on the dataplane struct on success\n- *   NULL othervise\n+ *   NULL otherwise\n  */\n void *\n rte_fib_get_dp(struct rte_fib *fib);\n@@ -201,7 +201,7 @@ rte_fib_get_dp(struct rte_fib *fib);\n  *   FIB object handle\n  * @return\n  *   Pointer on the RIB on success\n- *   NULL othervise\n+ *   NULL otherwise\n  */\n struct rte_rib *\n rte_fib_get_rib(struct rte_fib *fib);\ndiff --git a/lib/fib/rte_fib6.c b/lib/fib/rte_fib6.c\nindex be79efe0..4d35ea32 100644\n--- a/lib/fib/rte_fib6.c\n+++ b/lib/fib/rte_fib6.c\n@@ -40,10 +40,10 @@ EAL_REGISTER_TAILQ(rte_fib6_tailq)\n struct rte_fib6 {\n \tchar\t\t\tname[FIB6_NAMESIZE];\n \tenum rte_fib6_type\ttype;\t/**< Type of FIB struct */\n-\tstruct rte_rib6\t\t*rib;\t/**< RIB helper datastruct */\n+\tstruct rte_rib6\t\t*rib;\t/**< RIB helper datastructure */\n \tvoid\t\t\t*dp;\t/**< pointer to the dataplane struct*/\n \trte_fib6_lookup_fn_t\tlookup;\t/**< fib lookup function */\n-\trte_fib6_modify_fn_t\tmodify; /**< modify fib datastruct */\n+\trte_fib6_modify_fn_t\tmodify; /**< modify fib datastructure */\n \tuint64_t\t\tdef_nh;\n };\n \ndiff --git a/lib/fib/rte_fib6.h b/lib/fib/rte_fib6.h\nindex 95879af9..cb133719 100644\n--- a/lib/fib/rte_fib6.h\n+++ b/lib/fib/rte_fib6.h\n@@ -184,7 +184,7 @@ rte_fib6_lookup_bulk(struct rte_fib6 *fib,\n  *   FIB6 object handle\n  * @return\n  *   Pointer on the dataplane struct on success\n- *   NULL othervise\n+ *   NULL otherwise\n  */\n void *\n rte_fib6_get_dp(struct rte_fib6 *fib);\n@@ -196,7 +196,7 @@ rte_fib6_get_dp(struct rte_fib6 *fib);\n  *   FIB object handle\n  * @return\n  *   Pointer on the RIB6 on success\n- *   NULL othervise\n+ *   NULL otherwise\n  */\n struct rte_rib6 *\n rte_fib6_get_rib(struct rte_fib6 *fib);\ndiff --git a/lib/graph/graph_populate.c b/lib/graph/graph_populate.c\nindex 093512ef..62d2d69c 100644\n--- a/lib/graph/graph_populate.c\n+++ b/lib/graph/graph_populate.c\n@@ -46,7 +46,7 @@ graph_fp_mem_calc_size(struct graph *graph)\n }\n \n static void\n-graph_header_popluate(struct graph *_graph)\n+graph_header_populate(struct graph *_graph)\n {\n \tstruct rte_graph *graph = _graph->graph;\n \n@@ -184,7 +184,7 @@ graph_fp_mem_populate(struct graph *graph)\n {\n \tint rc;\n \n-\tgraph_header_popluate(graph);\n+\tgraph_header_populate(graph);\n \tgraph_nodes_populate(graph);\n \trc = graph_node_nexts_populate(graph);\n \trc |= graph_src_nodes_populate(graph);\ndiff --git a/lib/hash/rte_crc_arm64.h b/lib/hash/rte_crc_arm64.h\nindex b4628cfc..6995b414 100644\n--- a/lib/hash/rte_crc_arm64.h\n+++ b/lib/hash/rte_crc_arm64.h\n@@ -61,7 +61,7 @@ crc32c_arm64_u64(uint64_t data, uint32_t init_val)\n }\n \n /**\n- * Allow or disallow use of arm64 SIMD instrinsics for CRC32 hash\n+ * Allow or disallow use of arm64 SIMD intrinsics for CRC32 hash\n  * calculation.\n  *\n  * @param alg\ndiff --git a/lib/hash/rte_thash.c b/lib/hash/rte_thash.c\nindex 6847e36f..e27ac8ac 100644\n--- a/lib/hash/rte_thash.c\n+++ b/lib/hash/rte_thash.c\n@@ -27,7 +27,7 @@ static struct rte_tailq_elem rte_thash_tailq = {\n EAL_REGISTER_TAILQ(rte_thash_tailq)\n \n /**\n- * Table of some irreducible polinomials over GF(2).\n+ * Table of some irreducible polynomials over GF(2).\n  * For lfsr they are represented in BE bit order, and\n  * x^0 is masked out.\n  * For example, poly x^5 + x^2 + 1 will be represented\ndiff --git a/lib/ip_frag/ip_frag_internal.c b/lib/ip_frag/ip_frag_internal.c\nindex b436a4c9..01849284 100644\n--- a/lib/ip_frag/ip_frag_internal.c\n+++ b/lib/ip_frag/ip_frag_internal.c\n@@ -172,7 +172,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,\n \t\t\tmb = ipv6_frag_reassemble(fp);\n \t}\n \n-\t/* errorenous set of fragments. */\n+\t/* erroneous set of fragments. */\n \tif (mb == NULL) {\n \n \t\t/* report an error. */\ndiff --git a/lib/ipsec/ipsec_sad.c b/lib/ipsec/ipsec_sad.c\nindex 531e1e32..8548e2cf 100644\n--- a/lib/ipsec/ipsec_sad.c\n+++ b/lib/ipsec/ipsec_sad.c\n@@ -69,14 +69,14 @@ add_specific(struct rte_ipsec_sad *sad, const void *key,\n \t\tint key_type, void *sa)\n {\n \tvoid *tmp_val;\n-\tint ret, notexist;\n+\tint ret, nonexistent;\n \n \t/* Check if the key is present in the table.\n-\t * Need for further accaunting in cnt_arr\n+\t * Need for further accounting in cnt_arr\n \t */\n \tret = rte_hash_lookup_with_hash(sad->hash[key_type], key,\n \t\trte_hash_crc(key, sad->keysize[key_type], sad->init_val));\n-\tnotexist = (ret == -ENOENT);\n+\tnonexistent = (ret == -ENOENT);\n \n \t/* Add an SA to the corresponding table.*/\n \tret = rte_hash_add_key_with_hash_data(sad->hash[key_type], key,\n@@ -107,9 +107,9 @@ add_specific(struct rte_ipsec_sad *sad, const void *key,\n \tif (ret < 0)\n \t\treturn ret;\n \tif (key_type == RTE_IPSEC_SAD_SPI_DIP)\n-\t\tsad->cnt_arr[ret].cnt_dip += notexist;\n+\t\tsad->cnt_arr[ret].cnt_dip += nonexistent;\n \telse\n-\t\tsad->cnt_arr[ret].cnt_dip_sip += notexist;\n+\t\tsad->cnt_arr[ret].cnt_dip_sip += nonexistent;\n \n \treturn 0;\n }\ndiff --git a/lib/ipsec/ipsec_telemetry.c b/lib/ipsec/ipsec_telemetry.c\nindex b8b08404..9a91e471 100644\n--- a/lib/ipsec/ipsec_telemetry.c\n+++ b/lib/ipsec/ipsec_telemetry.c\n@@ -236,7 +236,7 @@ RTE_INIT(rte_ipsec_telemetry_init)\n \t\t\"Return list of IPsec SAs with telemetry enabled.\");\n \trte_telemetry_register_cmd(\"/ipsec/sa/stats\",\n \t\thandle_telemetry_cmd_ipsec_sa_stats,\n-\t\t\"Returns IPsec SA stastistics. Parameters: int sa_spi\");\n+\t\t\"Returns IPsec SA statistics. Parameters: int sa_spi\");\n \trte_telemetry_register_cmd(\"/ipsec/sa/details\",\n \t\thandle_telemetry_cmd_ipsec_sa_details,\n \t\t\"Returns IPsec SA configuration. Parameters: int sa_spi\");\ndiff --git a/lib/ipsec/rte_ipsec_sad.h b/lib/ipsec/rte_ipsec_sad.h\nindex b65d2958..a3ae57df 100644\n--- a/lib/ipsec/rte_ipsec_sad.h\n+++ b/lib/ipsec/rte_ipsec_sad.h\n@@ -153,7 +153,7 @@ rte_ipsec_sad_destroy(struct rte_ipsec_sad *sad);\n  * @param keys\n  *   Array of keys to be looked up in the SAD\n  * @param sa\n- *   Pointer assocoated with the keys.\n+ *   Pointer associated with the keys.\n  *   If the lookup for the given key failed, then corresponding sa\n  *   will be NULL\n  * @param n\ndiff --git a/lib/ipsec/sa.c b/lib/ipsec/sa.c\nindex 1e51482c..cdb70af0 100644\n--- a/lib/ipsec/sa.c\n+++ b/lib/ipsec/sa.c\n@@ -362,7 +362,7 @@ esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)\n \n \tmemcpy(sa->hdr, prm->tun.hdr, prm->tun.hdr_len);\n \n-\t/* insert UDP header if UDP encapsulation is inabled */\n+\t/* insert UDP header if UDP encapsulation is enabled */\n \tif (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {\n \t\tstruct rte_udp_hdr *udph = (struct rte_udp_hdr *)\n \t\t\t\t&sa->hdr[prm->tun.hdr_len];\ndiff --git a/lib/mbuf/rte_mbuf_core.h b/lib/mbuf/rte_mbuf_core.h\nindex 321a419c..3d6ddd67 100644\n--- a/lib/mbuf/rte_mbuf_core.h\n+++ b/lib/mbuf/rte_mbuf_core.h\n@@ -8,7 +8,7 @@\n \n /**\n  * @file\n- * This file contains definion of RTE mbuf structure itself,\n+ * This file contains definition of RTE mbuf structure itself,\n  * packet offload flags and some related macros.\n  * For majority of DPDK entities, it is not recommended to include\n  * this file directly, use include <rte_mbuf.h> instead.\ndiff --git a/lib/meson.build b/lib/meson.build\nindex 018976df..fbaa6ef7 100644\n--- a/lib/meson.build\n+++ b/lib/meson.build\n@@ -3,7 +3,7 @@\n \n \n # process all libraries equally, as far as possible\n-# \"core\" libs first, then others alphebetically as far as possible\n+# \"core\" libs first, then others alphabetically as far as possible\n # NOTE: for speed of meson runs, the dependencies in the subdirectories\n # sometimes skip deps that would be implied by others, e.g. if mempool is\n # given as a dep, no need to mention ring. This is especially true for the\ndiff --git a/lib/net/rte_l2tpv2.h b/lib/net/rte_l2tpv2.h\nindex b90e36cf..938a993b 100644\n--- a/lib/net/rte_l2tpv2.h\n+++ b/lib/net/rte_l2tpv2.h\n@@ -143,7 +143,7 @@ struct rte_l2tpv2_msg_without_length {\n /**\n  * L2TPv2 message Header contains all options except ns_nr(length,\n  * offset size, offset padding).\n- * Ns and Nr MUST be toghter.\n+ * Ns and Nr MUST be together.\n  */\n struct rte_l2tpv2_msg_without_ns_nr {\n \trte_be16_t length;\t\t/**< length(16) */\n@@ -155,7 +155,7 @@ struct rte_l2tpv2_msg_without_ns_nr {\n \n /**\n  * L2TPv2 message Header contains all options except ns_nr(length, ns, nr).\n- * offset size and offset padding MUST be toghter.\n+ * offset size and offset padding MUST be together.\n  */\n struct rte_l2tpv2_msg_without_offset {\n \trte_be16_t length;\t\t/**< length(16) */\ndiff --git a/lib/pipeline/rte_swx_ctl.h b/lib/pipeline/rte_swx_ctl.h\nindex 46d05823..82e62e70 100644\n--- a/lib/pipeline/rte_swx_ctl.h\n+++ b/lib/pipeline/rte_swx_ctl.h\n@@ -369,7 +369,7 @@ struct rte_swx_table_stats {\n \tuint64_t n_pkts_miss;\n \n \t/** Number of packets (with either lookup hit or miss) per pipeline\n-\t * action. Array of pipeline *n_actions* elements indedex by the\n+\t * action. Array of pipeline *n_actions* elements indexed by the\n \t * pipeline-level *action_id*, therefore this array has the same size\n \t * for all the tables within the same pipeline.\n \t */\n@@ -629,7 +629,7 @@ struct rte_swx_learner_stats {\n \tuint64_t n_pkts_forget;\n \n \t/** Number of packets (with either lookup hit or miss) per pipeline action. Array of\n-\t * pipeline *n_actions* elements indedex by the pipeline-level *action_id*, therefore this\n+\t * pipeline *n_actions* elements indexed by the pipeline-level *action_id*, therefore this\n \t * array has the same size for all the tables within the same pipeline.\n \t */\n \tuint64_t *n_pkts_action;\ndiff --git a/lib/pipeline/rte_swx_pipeline_internal.h b/lib/pipeline/rte_swx_pipeline_internal.h\nindex 1921fdcd..fa944c95 100644\n--- a/lib/pipeline/rte_swx_pipeline_internal.h\n+++ b/lib/pipeline/rte_swx_pipeline_internal.h\n@@ -309,7 +309,7 @@ enum instruction_type {\n \t */\n \tINSTR_ALU_CKADD_FIELD,    /* src = H */\n \tINSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */\n-\tINSTR_ALU_CKADD_STRUCT,   /* src = h.hdeader, with any sizeof(header) */\n+\tINSTR_ALU_CKADD_STRUCT,   /* src = h.header, with any sizeof(header) */\n \n \t/* cksub dst src\n \t * dst = dst '- src\n@@ -1562,7 +1562,7 @@ emit_handler(struct thread *t)\n \t\treturn;\n \t}\n \n-\t/* Header encapsulation (optionally, with prior header decasulation). */\n+\t/* Header encapsulation (optionally, with prior header decapsulation). */\n \tif ((t->n_headers_out == 2) &&\n \t    (h1->ptr + h1->n_bytes == t->ptr) &&\n \t    (h0->ptr == h0->ptr0)) {\ndiff --git a/lib/pipeline/rte_swx_pipeline_spec.c b/lib/pipeline/rte_swx_pipeline_spec.c\nindex 8e9aa44e..07a7580a 100644\n--- a/lib/pipeline/rte_swx_pipeline_spec.c\n+++ b/lib/pipeline/rte_swx_pipeline_spec.c\n@@ -2011,7 +2011,7 @@ rte_swx_pipeline_build_from_spec(struct rte_swx_pipeline *p,\n \t\tif (err_line)\n \t\t\t*err_line = 0;\n \t\tif (err_msg)\n-\t\t\t*err_msg = \"Null pipeline arument.\";\n+\t\t\t*err_msg = \"Null pipeline argument.\";\n \t\tstatus = -EINVAL;\n \t\tgoto error;\n \t}\ndiff --git a/lib/power/power_cppc_cpufreq.c b/lib/power/power_cppc_cpufreq.c\nindex 6afd310e..25185a79 100644\n--- a/lib/power/power_cppc_cpufreq.c\n+++ b/lib/power/power_cppc_cpufreq.c\n@@ -621,7 +621,7 @@ power_cppc_enable_turbo(unsigned int lcore_id)\n \t\treturn -1;\n \t}\n \n-\t/* TODO: must set to max once enbling Turbo? Considering add condition:\n+\t/* TODO: must set to max once enabling Turbo? Considering add condition:\n \t * if ((pi->turbo_available) && (pi->curr_idx <= 1))\n \t */\n \t/* Max may have changed, so call to max function */\ndiff --git a/lib/regexdev/rte_regexdev.h b/lib/regexdev/rte_regexdev.h\nindex 86f0b231..0bac46cd 100644\n--- a/lib/regexdev/rte_regexdev.h\n+++ b/lib/regexdev/rte_regexdev.h\n@@ -298,14 +298,14 @@ rte_regexdev_get_dev_id(const char *name);\n  * backtracking positions remembered by any tokens inside the group.\n  * Example RegEx is `a(?>bc|b)c` if the given patterns are `abc` and `abcc` then\n  * `a(bc|b)c` matches both where as `a(?>bc|b)c` matches only abcc because\n- * atomic groups don't allow backtracing back to `b`.\n+ * atomic groups don't allow backtracking back to `b`.\n  *\n  * @see struct rte_regexdev_info::regexdev_capa\n  */\n \n #define RTE_REGEXDEV_SUPP_PCRE_BACKTRACKING_CTRL_F (1ULL << 3)\n /**< RegEx device support PCRE backtracking control verbs.\n- * Some examples of backtracing verbs are (*COMMIT), (*ACCEPT), (*FAIL),\n+ * Some examples of backtracking verbs are (*COMMIT), (*ACCEPT), (*FAIL),\n  * (*SKIP), (*PRUNE).\n  *\n  * @see struct rte_regexdev_info::regexdev_capa\n@@ -1015,7 +1015,7 @@ rte_regexdev_rule_db_update(uint8_t dev_id,\n  * @b EXPERIMENTAL: this API may change without prior notice.\n  *\n  * Compile local rule set and burn the complied result to the\n- * RegEx deive.\n+ * RegEx device.\n  *\n  * @param dev_id\n  *   RegEx device identifier.\ndiff --git a/lib/ring/rte_ring_core.h b/lib/ring/rte_ring_core.h\nindex 46ad584f..1252ca95 100644\n--- a/lib/ring/rte_ring_core.h\n+++ b/lib/ring/rte_ring_core.h\n@@ -12,7 +12,7 @@\n \n /**\n  * @file\n- * This file contains definion of RTE ring structure itself,\n+ * This file contains definition of RTE ring structure itself,\n  * init flags and some related macros.\n  * For majority of DPDK entities, it is not recommended to include\n  * this file directly, use include <rte_ring.h> or <rte_ring_elem.h>\ndiff --git a/lib/sched/rte_pie.h b/lib/sched/rte_pie.h\nindex dfdf5723..02a987f5 100644\n--- a/lib/sched/rte_pie.h\n+++ b/lib/sched/rte_pie.h\n@@ -252,7 +252,7 @@ _rte_pie_drop(const struct rte_pie_config *pie_cfg,\n }\n \n /**\n- * @brief Decides if new packet should be enqeued or dropped for non-empty queue\n+ * @brief Decides if new packet should be enqueued or dropped for non-empty queue\n  *\n  * @param pie_cfg [in] config pointer to a PIE configuration parameter structure\n  * @param pie [in,out] data pointer to PIE runtime data\n@@ -319,7 +319,7 @@ rte_pie_enqueue_nonempty(const struct rte_pie_config *pie_cfg,\n }\n \n /**\n- * @brief Decides if new packet should be enqeued or dropped\n+ * @brief Decides if new packet should be enqueued or dropped\n  * Updates run time data and gives verdict whether to enqueue or drop the packet.\n  *\n  * @param pie_cfg [in] config pointer to a PIE configuration parameter structure\n@@ -330,7 +330,7 @@ rte_pie_enqueue_nonempty(const struct rte_pie_config *pie_cfg,\n  *\n  * @return Operation status\n  * @retval 0 enqueue the packet\n- * @retval 1 drop the packet based on drop probility criteria\n+ * @retval 1 drop the packet based on drop probability criteria\n  */\n static inline int\n __rte_experimental\ndiff --git a/lib/sched/rte_red.h b/lib/sched/rte_red.h\nindex 36273cac..f5843dab 100644\n--- a/lib/sched/rte_red.h\n+++ b/lib/sched/rte_red.h\n@@ -303,7 +303,7 @@ __rte_red_drop(const struct rte_red_config *red_cfg, struct rte_red *red)\n }\n \n /**\n- * @brief Decides if new packet should be enqeued or dropped in queue non-empty case\n+ * @brief Decides if new packet should be enqueued or dropped in queue non-empty case\n  *\n  * @param red_cfg [in] config pointer to a RED configuration parameter structure\n  * @param red [in,out] data pointer to RED runtime data\n@@ -361,7 +361,7 @@ rte_red_enqueue_nonempty(const struct rte_red_config *red_cfg,\n }\n \n /**\n- * @brief Decides if new packet should be enqeued or dropped\n+ * @brief Decides if new packet should be enqueued or dropped\n  * Updates run time data based on new queue size value.\n  * Based on new queue average and RED configuration parameters\n  * gives verdict whether to enqueue or drop the packet.\ndiff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c\nindex ed44808f..62b3d2e3 100644\n--- a/lib/sched/rte_sched.c\n+++ b/lib/sched/rte_sched.c\n@@ -239,7 +239,7 @@ struct rte_sched_port {\n \tint socket;\n \n \t/* Timing */\n-\tuint64_t time_cpu_cycles;     /* Current CPU time measured in CPU cyles */\n+\tuint64_t time_cpu_cycles;     /* Current CPU time measured in CPU cycles */\n \tuint64_t time_cpu_bytes;      /* Current CPU time measured in bytes */\n \tuint64_t time;                /* Current NIC TX time measured in bytes */\n \tstruct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */\ndiff --git a/lib/sched/rte_sched.h b/lib/sched/rte_sched.h\nindex 484dbdcc..3c625ba1 100644\n--- a/lib/sched/rte_sched.h\n+++ b/lib/sched/rte_sched.h\n@@ -360,7 +360,7 @@ rte_sched_subport_pipe_profile_add(struct rte_sched_port *port,\n  *\n  * Hierarchical scheduler subport bandwidth profile add\n  * Note that this function is safe to use in runtime for adding new\n- * subport bandwidth profile as it doesn't have any impact on hiearchical\n+ * subport bandwidth profile as it doesn't have any impact on hierarchical\n  * structure of the scheduler.\n  * @param port\n  *   Handle to port scheduler instance\ndiff --git a/lib/table/rte_swx_table.h b/lib/table/rte_swx_table.h\nindex f93e5f3f..c1383c2e 100644\n--- a/lib/table/rte_swx_table.h\n+++ b/lib/table/rte_swx_table.h\n@@ -216,7 +216,7 @@ typedef int\n  * operations into the same table.\n  *\n  * The typical reason an implementation may choose to split the table lookup\n- * operation into multiple steps is to hide the latency of the inherrent memory\n+ * operation into multiple steps is to hide the latency of the inherent memory\n  * read operations: before a read operation with the source data likely not in\n  * the CPU cache, the source data prefetch is issued and the table lookup\n  * operation is postponed in favor of some other unrelated work, which the CPU\ndiff --git a/lib/table/rte_swx_table_selector.h b/lib/table/rte_swx_table_selector.h\nindex 62988d28..05863cc9 100644\n--- a/lib/table/rte_swx_table_selector.h\n+++ b/lib/table/rte_swx_table_selector.h\n@@ -155,7 +155,7 @@ rte_swx_table_selector_group_set(void *table,\n  * mechanism allows for multiple concurrent select operations into the same table.\n  *\n  * The typical reason an implementation may choose to split the operation into multiple steps is to\n- * hide the latency of the inherrent memory read operations: before a read operation with the\n+ * hide the latency of the inherent memory read operations: before a read operation with the\n  * source data likely not in the CPU cache, the source data prefetch is issued and the operation is\n  * postponed in favor of some other unrelated work, which the CPU executes in parallel with the\n  * source data being fetched into the CPU cache; later on, the operation is resumed, this time with\ndiff --git a/lib/telemetry/telemetry.c b/lib/telemetry/telemetry.c\nindex a7483167..e5ccfe47 100644\n--- a/lib/telemetry/telemetry.c\n+++ b/lib/telemetry/telemetry.c\n@@ -534,7 +534,7 @@ telemetry_legacy_init(void)\n \t}\n \trc = pthread_create(&t_old, NULL, socket_listener, &v1_socket);\n \tif (rc != 0) {\n-\t\tTMTY_LOG(ERR, \"Error with create legcay socket thread: %s\\n\",\n+\t\tTMTY_LOG(ERR, \"Error with create legacy socket thread: %s\\n\",\n \t\t\t strerror(rc));\n \t\tclose(v1_socket.sock);\n \t\tv1_socket.sock = -1;\ndiff --git a/lib/telemetry/telemetry_json.h b/lib/telemetry/telemetry_json.h\nindex f02a12f5..db706902 100644\n--- a/lib/telemetry/telemetry_json.h\n+++ b/lib/telemetry/telemetry_json.h\n@@ -23,7 +23,7 @@\n /**\n  * @internal\n  * Copies a value into a buffer if the buffer has enough available space.\n- * Nothing written to buffer if an overflow ocurs.\n+ * Nothing written to buffer if an overflow occurs.\n  * This function is not for use for values larger than given buffer length.\n  */\n __rte_format_printf(3, 4)\ndiff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c\nindex a781346c..05ef70f6 100644\n--- a/lib/vhost/vhost_user.c\n+++ b/lib/vhost/vhost_user.c\n@@ -1115,7 +1115,7 @@ vhost_user_postcopy_region_register(struct virtio_net *dev,\n \tstruct uffdio_register reg_struct;\n \n \t/*\n-\t * Let's register all the mmap'ed area to ensure\n+\t * Let's register all the mmapped area to ensure\n \t * alignment on page boundary.\n \t */\n \treg_struct.range.start = (uint64_t)(uintptr_t)reg->mmap_addr;\n@@ -1177,7 +1177,7 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,\n \tmsg->fd_num = 0;\n \tsend_vhost_reply(main_fd, msg);\n \n-\t/* Wait for qemu to acknolwedge it's got the addresses\n+\t/* Wait for qemu to acknowledge it's got the addresses\n \t * we've got to wait before we're allowed to generate faults.\n \t */\n \tif (read_vhost_message(main_fd, &ack_msg) <= 0) {\ndiff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c\nindex b3d954aa..28a4dc1b 100644\n--- a/lib/vhost/virtio_net.c\n+++ b/lib/vhost/virtio_net.c\n@@ -477,14 +477,14 @@ map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \n \twhile (desc_len) {\n \t\tuint64_t desc_addr;\n-\t\tuint64_t desc_chunck_len = desc_len;\n+\t\tuint64_t desc_chunk_len = desc_len;\n \n \t\tif (unlikely(vec_id >= BUF_VECTOR_MAX))\n \t\t\treturn -1;\n \n \t\tdesc_addr = vhost_iova_to_vva(dev, vq,\n \t\t\t\tdesc_iova,\n-\t\t\t\t&desc_chunck_len,\n+\t\t\t\t&desc_chunk_len,\n \t\t\t\tperm);\n \t\tif (unlikely(!desc_addr))\n \t\t\treturn -1;\n@@ -493,10 +493,10 @@ map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \n \t\tbuf_vec[vec_id].buf_iova = desc_iova;\n \t\tbuf_vec[vec_id].buf_addr = desc_addr;\n-\t\tbuf_vec[vec_id].buf_len  = desc_chunck_len;\n+\t\tbuf_vec[vec_id].buf_len  = desc_chunk_len;\n \n-\t\tdesc_len -= desc_chunck_len;\n-\t\tdesc_iova += desc_chunck_len;\n+\t\tdesc_len -= desc_chunk_len;\n+\t\tdesc_iova += desc_chunk_len;\n \t\tvec_id++;\n \t}\n \t*vec_idx = vec_id;\n",
    "prefixes": []
}