get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/41483/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 41483,
    "url": "http://patchwork.dpdk.org/api/patches/41483/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/064ea16d773eef551c174f27f0a331d1871c91fc.1529940601.git.anatoly.burakov@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<064ea16d773eef551c174f27f0a331d1871c91fc.1529940601.git.anatoly.burakov@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/064ea16d773eef551c174f27f0a331d1871c91fc.1529940601.git.anatoly.burakov@intel.com",
    "date": "2018-06-25T15:59:44",
    "name": "[RFC,7/9] usertools/lib: add hugepage information library",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "8b0dc68057eb9097ca48cef22247a61a57f4ca53",
    "submitter": {
        "id": 4,
        "url": "http://patchwork.dpdk.org/api/people/4/?format=api",
        "name": "Anatoly Burakov",
        "email": "anatoly.burakov@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/064ea16d773eef551c174f27f0a331d1871c91fc.1529940601.git.anatoly.burakov@intel.com/mbox/",
    "series": [
        {
            "id": 225,
            "url": "http://patchwork.dpdk.org/api/series/225/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=225",
            "date": "2018-06-25T15:59:39",
            "name": "Modularize and enhance DPDK Python scripts",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/225/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/41483/comments/",
    "check": "fail",
    "checks": "http://patchwork.dpdk.org/api/patches/41483/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 1CFAE5398;\n\tMon, 25 Jun 2018 18:00:03 +0200 (CEST)",
            "from mga06.intel.com (mga06.intel.com [134.134.136.31])\n\tby dpdk.org (Postfix) with ESMTP id 2EA234CA1\n\tfor <dev@dpdk.org>; Mon, 25 Jun 2018 17:59:51 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t25 Jun 2018 08:59:49 -0700",
            "from irvmail001.ir.intel.com ([163.33.26.43])\n\tby FMSMGA003.fm.intel.com with ESMTP; 25 Jun 2018 08:59:48 -0700",
            "from sivswdev01.ir.intel.com (sivswdev01.ir.intel.com\n\t[10.237.217.45])\n\tby irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id\n\tw5PFxlQ3032518; Mon, 25 Jun 2018 16:59:47 +0100",
            "from sivswdev01.ir.intel.com (localhost [127.0.0.1])\n\tby sivswdev01.ir.intel.com with ESMTP id w5PFxlZa026634;\n\tMon, 25 Jun 2018 16:59:47 +0100",
            "(from aburakov@localhost)\n\tby sivswdev01.ir.intel.com with LOCAL id w5PFxlWo026630;\n\tMon, 25 Jun 2018 16:59:47 +0100"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.51,270,1526367600\"; d=\"scan'208\";a=\"60036798\"",
        "From": "Anatoly Burakov <anatoly.burakov@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "john.mcnamara@intel.com, bruce.richardson@intel.com,\n\tpablo.de.lara.guarch@intel.com, david.hunt@intel.com,\n\tmohammad.abdul.awal@intel.com",
        "Date": "Mon, 25 Jun 2018 16:59:44 +0100",
        "Message-Id": "<064ea16d773eef551c174f27f0a331d1871c91fc.1529940601.git.anatoly.burakov@intel.com>",
        "X-Mailer": "git-send-email 1.7.0.7",
        "In-Reply-To": [
            "<cover.1529940601.git.anatoly.burakov@intel.com>",
            "<cover.1529940601.git.anatoly.burakov@intel.com>"
        ],
        "References": [
            "<cover.1529940601.git.anatoly.burakov@intel.com>",
            "<cover.1529940601.git.anatoly.burakov@intel.com>"
        ],
        "Subject": "[dpdk-dev] [RFC 7/9] usertools/lib: add hugepage information library",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add a library for getting hugepage information on Linux system.\n\nSupported functionality:\n\n- List active hugetlbfs mountpoints\n- Change hugetlbfs mountpoints\n  - Supports both transient and persistent (fstab) mountpoints\n- Display/change number of allocated hugepages\n  - Supports both total and per-NUMA node page counts\n\nSigned-off-by: Anatoly Burakov <anatoly.burakov@intel.com>\n---\n usertools/DPDKConfigLib/HugeUtil.py | 309 ++++++++++++++++++++++++++++\n usertools/DPDKConfigLib/Util.py     |  49 +++++\n 2 files changed, 358 insertions(+)\n create mode 100755 usertools/DPDKConfigLib/HugeUtil.py",
    "diff": "diff --git a/usertools/DPDKConfigLib/HugeUtil.py b/usertools/DPDKConfigLib/HugeUtil.py\nnew file mode 100755\nindex 000000000..79ed97bb7\n--- /dev/null\n+++ b/usertools/DPDKConfigLib/HugeUtil.py\n@@ -0,0 +1,309 @@\n+#!/usr/bin/env python\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2018 Intel Corporation\n+\n+\n+from .PlatformInfo import *\n+from .Util import *\n+import re\n+import os\n+import subprocess\n+\n+__KERNEL_NUMA_HP_PATH = \\\n+    \"/sys/devices/system/node/node%i/hugepages/hugepages-%ikB/\"\n+__KERNEL_HP_PATH = \"/sys/kernel/mm/hugepages/hugepages-%ikB/\"\n+__NR_HP_FNAME = \"nr_hugepages\"\n+# check if we have systemd\n+_have_systemd = run([\"which\", \"systemctl\"])\n+\n+# local copy of platform info\n+info = PlatformInfo()\n+\n+\n+def _find_runtime_hugetlbfs_mountpoints():\n+    mountpoints = {}\n+    with open(\"/proc/mounts\") as f:\n+        for line in f:\n+            if not _is_hugetlbfs_mount(line):\n+                continue\n+            line = line.strip()\n+            _, path, _, options, _, _ = line.split()\n+\n+            m = re.search(r\"pagesize=(\\d+\\w)\", options)\n+            if m:\n+                pagesz = human_readable_to_kilobytes(m.group(1))\n+            else:\n+                # if no size specified, assume default hugepage size\n+                pagesz = info.default_hugepage_size\n+            if pagesz in mountpoints:\n+                raise RuntimeError(\"Multiple mountpoints for same hugetlbfs\")\n+            mountpoints[pagesz] = path\n+    return mountpoints\n+\n+\n+def _find_nr_hugepages(page_sz, node=None):\n+    if node is not None:\n+        path = os.path.join(__KERNEL_NUMA_HP_PATH % (node, page_sz),\n+                            __NR_HP_FNAME)\n+    else:\n+        path = os.path.join(__KERNEL_HP_PATH % (page_sz), __NR_HP_FNAME)\n+    return int(read_file(path))\n+\n+\n+def _write_nr_hugepages(page_sz, nr_pages, node=None):\n+    if node is not None:\n+        path = os.path.join(__KERNEL_NUMA_HP_PATH % (node, page_sz),\n+                            __NR_HP_FNAME)\n+    else:\n+        path = os.path.join(__KERNEL_HP_PATH % (page_sz), __NR_HP_FNAME)\n+    write_file(path, str(nr_pages))\n+\n+\n+def _is_hugetlbfs_mount(line):\n+    # ignore comemnts\n+    if line.strip().startswith(\"#\"):\n+        return False\n+    tokens = line.split()\n+    if len(tokens) != 6:\n+        return False\n+    return tokens[2] == \"hugetlbfs\"\n+\n+\n+def _update_fstab_hugetlbfs_mounts(mountpoints):\n+    # remove all hugetlbfs mappings\n+    with open(\"/etc/fstab\") as f:\n+        lines = f.readlines()\n+    mount_idxs = [idx for idx, line in enumerate(lines)\n+                  if _is_hugetlbfs_mount(line)]\n+\n+    # delete all lines with hugetlbfs mountpoints\n+    for idx in reversed(sorted(mount_idxs)):\n+        del lines[idx]\n+\n+    # append new mountpoints\n+    lines.extend([\"hugetlbfs %s hugetlbfs pagesize=%s 0 0\\n\" %\n+                  (mountpoints[size], kilobytes_to_human_readable(size))\n+                  for size in mountpoints.keys() if mountpoints[size] != \"\"])\n+\n+    # finally, write everything back\n+    with open(\"/etc/fstab\", \"w\") as f:\n+        f.writelines(lines)\n+\n+\n+def _find_fstab_hugetlbfs_mounts():\n+    mountpoints = {}\n+    with open(\"/etc/fstab\") as f:\n+        for line in f:\n+            if not _is_hugetlbfs_mount(line):\n+                continue\n+            line = line.strip()\n+            _, path, _, options, _, _ = line.split()\n+\n+            m = re.search(r\"pagesize=(\\d+\\w)\", options)\n+            if m:\n+                pagesz = human_readable_to_kilobytes(m.group(1))\n+            else:\n+                # if no size specified, assume default hugepage size\n+                pagesz = info.default_hugepage_size\n+            if pagesz in mountpoints:\n+                raise RuntimeError(\"Multiple mountpoints for same hugetlbfs\")\n+            mountpoints[pagesz] = path\n+    return mountpoints\n+\n+\n+def _find_systemd_hugetlbfs_mounts():\n+    # we find systemd mounts by virtue of them not being in fstab, so check each\n+    units = []\n+    out = subprocess.check_output([\"systemctl\", \"-t\", \"mount\", \"--all\"],\n+                                  stderr=None)\n+    lines = out.decode(\"utf-8\").splitlines()\n+    for line in lines:\n+        line = line.strip()\n+\n+        tokens = line.split()\n+\n+        if len(tokens) == 0:\n+            continue\n+\n+        # masked unit files are second token\n+        if tokens[0].endswith(\".mount\"):\n+            unit = tokens[0]\n+        elif tokens[1].endswith(\".mount\"):\n+            tokens = tokens[1:]\n+            unit = tokens[0]\n+        else:\n+            continue  # not a unit line\n+\n+        # if this is inactive and masked, we don't care\n+        load, active, sub = tokens[1:4]\n+        if load == \"masked\" and active == \"inactive\":\n+            continue\n+\n+        units.append({\"unit\": unit, \"load\": load, \"active\": active, \"sub\": sub})\n+\n+    for unit_dict in units:\n+        # status may return non-zero, but we don't care\n+        try:\n+            out = subprocess.check_output([\"systemctl\", \"status\",\n+                                           unit_dict[\"unit\"]], stderr=None)\n+        except subprocess.CalledProcessError as e:\n+            out = e.output\n+        lines = out.decode(\"utf-8\").splitlines()\n+        for line in lines:\n+            line = line.strip()\n+            if line.startswith(\"What\"):\n+                unit_dict[\"fs\"] = line.split()[1]\n+            elif line.startswith(\"Where\"):\n+                unit_dict[\"path\"] = line.split()[1]\n+\n+    fstab_mountpoints = _find_fstab_hugetlbfs_mounts().values()\n+    filter_func = (lambda x: x.get(\"fs\", \"\") == \"hugetlbfs\" and\n+                             x.get(\"path\", \"\") not in fstab_mountpoints)\n+    return {u[\"unit\"]: u[\"path\"] for u in filter(filter_func, units)}\n+\n+\n+def _disable_systemd_hugetlbfs_mounts():\n+    mounts = _find_systemd_hugetlbfs_mounts()\n+    for unit, path in mounts.keys():\n+        run([\"systemctl\", \"stop\", unit])  # unmount\n+        run([\"systemctl\", \"mask\", unit])  # prevent this from ever running\n+\n+\n+class PersistentMountpointConfig:\n+    def __init__(self):\n+        self.update()\n+\n+    def update(self):\n+        self.reset()\n+        self.mountpoints = _find_fstab_hugetlbfs_mounts()\n+        for sz in info.hugepage_sizes_enabled:\n+            self.mountpoints.setdefault(sz, \"\")\n+\n+    def commit(self):\n+        # check if we are trying to mount hugetlbfs of unsupported size\n+        supported = set(info.hugepage_sizes_supported)\n+        all_sizes = set(self.mountpoints.keys())\n+        if not all_sizes.issubset(supported):\n+            diff = supported.difference(all_sizes)\n+            raise ValueError(\"Unsupported hugepage sizes: %s\" %\n+                             [kilobytes_to_human_readable(s) for s in diff])\n+\n+        if _have_systemd:\n+            # dealing with fstab is easier, so disable all systemd mounts\n+            _disable_systemd_hugetlbfs_mounts()\n+\n+        _update_fstab_hugetlbfs_mounts(self.mountpoints)\n+\n+        if _have_systemd:\n+            run([\"systemctl\", \"daemon-reload\"])\n+        self.update()\n+\n+    def reset(self):\n+        self.mountpoints = {}  # pagesz : path\n+\n+\n+class RuntimeMountpointConfig:\n+    def __init__(self):\n+        self.update()\n+\n+    def update(self):\n+        self.reset()\n+        self.mountpoints = _find_runtime_hugetlbfs_mountpoints()\n+        for sz in info.hugepage_sizes_enabled:\n+            self.mountpoints.setdefault(sz, \"\")\n+\n+    def commit(self):\n+        # check if we are trying to mount hugetlbfs of unsupported size\n+        supported = set(info.hugepage_sizes_supported)\n+        all_sizes = set(self.mountpoints.keys())\n+        if not all_sizes.issubset(supported):\n+            diff = supported.difference(all_sizes)\n+            raise ValueError(\"Unsupported hugepage sizes: %s\" %\n+                             [kilobytes_to_human_readable(s) for s in diff])\n+\n+        cur_mp = _find_runtime_hugetlbfs_mountpoints()\n+        sizes = set(cur_mp.keys()).union(self.mountpoints.keys())\n+\n+        for size in sizes:\n+            old = cur_mp.get(size, \"\")\n+            new = self.mountpoints.get(size, \"\")\n+\n+            is_unmount = old != \"\" and new == \"\"\n+            is_mount = old == \"\" and new != \"\"\n+            is_remount = old != \"\" and new != \"\" and old != new\n+\n+            mount_param = [\"-t\", \"hugetlbfs\", \"-o\",\n+                           \"pagesize=%sM\" % (size / 1024)]\n+\n+            if is_unmount:\n+                run([\"umount\", old])\n+            elif is_mount:\n+                mkpath(new)\n+                run([\"mount\"] + mount_param + [new])\n+            elif is_remount:\n+                mkpath(new)\n+                run([\"umount\", old])\n+                run([\"mount\"] + mount_param + [new])\n+\n+        if _have_systemd:\n+            run([\"systemctl\", \"daemon-reload\"])\n+        self.update()\n+\n+    def reset(self):\n+        self.mountpoints = {}  # pagesz : path\n+\n+\n+class RuntimeHugepageConfig:\n+    def __init__(self):\n+        self.update()\n+\n+    def update(self):\n+        self.reset()\n+\n+        hugepage_sizes = info.hugepage_sizes_enabled\n+        if len(hugepage_sizes) == 0:\n+            raise RuntimeError(\"Hugepages appear to be disabled\")\n+        self.total_nr_hugepages = \\\n+            {page_sz: _find_nr_hugepages(page_sz)\n+             for page_sz in hugepage_sizes}\n+        for node in info.numa_nodes:\n+            for page_sz in hugepage_sizes:\n+                self.hugepages_per_node[node, page_sz] = \\\n+                    _find_nr_hugepages(page_sz, node)\n+\n+    def commit(self):\n+        # sanity checks\n+\n+        # check if user has messed with hugepage sizes\n+        supported_sizes = set(info.hugepage_sizes_supported)\n+        keys = self.total_nr_hugepages.keys()\n+        if set(keys) != set(supported_sizes):\n+            diff = supported_sizes.difference(keys)\n+            raise ValueError(\"Missing hugepage sizes: %s\" %\n+                             [kilobytes_to_human_readable(s) for s in diff])\n+\n+        for d in self.hugepages_per_node:\n+            keys = d.keys()\n+            if set(keys) != set(supported_sizes):\n+                diff = supported_sizes.difference(keys)\n+                raise ValueError(\"Missing hugepage sizes: %s\" %\n+                                 [kilobytes_to_human_readable(s) for s in diff])\n+\n+        # check if all hugepage numbers add up\n+        for size in supported_sizes:\n+            total_hps = sum([self.hugepages_per_node[node, size]\n+                             for node in info.numa_nodes])\n+            if total_hps != self.total_nr_hugepages[size]:\n+                raise ValueError(\"Total number of hugepages not equal to sum of\"\n+                                 \"pages on all NUMA nodes\")\n+\n+        # now, commit our configuration\n+        for size, value in self.total_nr_hugepages.items():\n+            _write_nr_hugepages(size, value)\n+        for node, size, value in self.hugepages_per_node.items():\n+            _write_nr_hugepages(size, value, node)\n+        self.update()\n+\n+    def reset(self):\n+        self.total_nr_hugepages = {}\n+        self.hugepages_per_node = {}\ndiff --git a/usertools/DPDKConfigLib/Util.py b/usertools/DPDKConfigLib/Util.py\nindex eb21cce15..ba0c36537 100755\n--- a/usertools/DPDKConfigLib/Util.py\n+++ b/usertools/DPDKConfigLib/Util.py\n@@ -2,6 +2,25 @@\n # SPDX-License-Identifier: BSD-3-Clause\n # Copyright(c) 2018 Intel Corporation\n \n+import subprocess\n+import re\n+import os\n+import errno\n+\n+__PGSZ_UNITS = ['k', 'M', 'G', 'T', 'P']\n+\n+\n+# equivalent to mkdir -p\n+def mkpath(path):\n+    try:\n+        os.makedirs(path)\n+    except OSError as e:\n+        if e.errno == errno.EEXIST:\n+            pass\n+        else:\n+            raise e\n+\n+\n # read entire file and return the result\n def read_file(path):\n     with open(path, 'r') as f:\n@@ -21,6 +40,36 @@ def append_file(path, value):\n         f.write(value)\n \n \n+# run command while suppressing its output\n+def run(args):\n+    try:\n+        subprocess.check_output(args, stderr=None)\n+    except subprocess.CalledProcessError:\n+        return False\n+    return True\n+\n+\n+def kilobytes_to_human_readable(value):\n+    for unit in __PGSZ_UNITS:\n+        if abs(value) < 1024:\n+            cur_unit = unit\n+            break\n+        value /= 1024\n+    else:\n+        raise ValueError(\"Value too large\")\n+    return \"%i%s\" % (value, cur_unit)\n+\n+\n+def human_readable_to_kilobytes(value):\n+    m = re.match(r\"(\\d+)([%s])$\" % ''.join(__PGSZ_UNITS), value)\n+    if not m:\n+        raise ValueError(\"Invalid value format: %s\" % value)\n+    ival = int(m.group(1))\n+    suffix = m.group(2)\n+    pow = __PGSZ_UNITS.index(suffix)\n+    return ival * (1024 ** pow)\n+\n+\n # split line into key-value pair, cleaning up the values in the process\n def kv_split(line, separator):\n     # just in case\n",
    "prefixes": [
        "RFC",
        "7/9"
    ]
}