@@ -59,7 +59,6 @@
#include <rte_mempool.h>
#include <rte_prefetch.h>
#include <rte_malloc.h>
-#include <rte_spinlock.h>
#include <rte_log.h>
#include <rte_alarm.h>
#include <rte_memory.h>
@@ -121,29 +120,6 @@ priv_rx_intr_vec_enable(struct priv *priv);
static void
priv_rx_intr_vec_disable(struct priv *priv);
-/**
- * Lock private structure to protect it from concurrent access in the
- * control path.
- *
- * @param priv
- * Pointer to private structure.
- */
-void priv_lock(struct priv *priv)
-{
- rte_spinlock_lock(&priv->lock);
-}
-
-/**
- * Unlock private structure.
- *
- * @param priv
- * Pointer to private structure.
- */
-void priv_unlock(struct priv *priv)
-{
- rte_spinlock_unlock(&priv->lock);
-}
-
/* Allocate a buffer on the stack and fill it with a printf format string. */
#define MKSTR(name, ...) \
char name[snprintf(NULL, 0, __VA_ARGS__) + 1]; \
@@ -570,13 +546,7 @@ dev_configure(struct rte_eth_dev *dev)
static int
mlx4_dev_configure(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int ret;
-
- priv_lock(priv);
- ret = dev_configure(dev);
- priv_unlock(priv);
- return ret;
+ return dev_configure(dev);
}
static uint16_t mlx4_tx_burst(void *, struct rte_mbuf **, uint16_t);
@@ -1328,14 +1298,12 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct txq *txq = (*priv->txqs)[idx];
int ret;
- priv_lock(priv);
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
if (idx >= priv->txqs_n) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
(void *)dev, idx, priv->txqs_n);
- priv_unlock(priv);
return -rte_errno;
}
if (txq != NULL) {
@@ -1343,7 +1311,6 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev, idx, (void *)txq);
if (priv->started) {
rte_errno = EEXIST;
- priv_unlock(priv);
return -rte_errno;
}
(*priv->txqs)[idx] = NULL;
@@ -1354,7 +1321,6 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rte_errno = ENOMEM;
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
- priv_unlock(priv);
return -rte_errno;
}
}
@@ -1369,7 +1335,6 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
/* Update send callback. */
dev->tx_pkt_burst = mlx4_tx_burst;
}
- priv_unlock(priv);
return ret;
}
@@ -1389,7 +1354,6 @@ mlx4_tx_queue_release(void *dpdk_txq)
if (txq == NULL)
return;
priv = txq->priv;
- priv_lock(priv);
for (i = 0; (i != priv->txqs_n); ++i)
if ((*priv->txqs)[i] == txq) {
DEBUG("%p: removing TX queue %p from list",
@@ -1399,7 +1363,6 @@ mlx4_tx_queue_release(void *dpdk_txq)
}
txq_cleanup(txq);
rte_free(txq);
- priv_unlock(priv);
}
/* RX queues handling. */
@@ -2029,14 +1992,12 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct rxq *rxq = (*priv->rxqs)[idx];
int ret;
- priv_lock(priv);
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
if (idx >= priv->rxqs_n) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
(void *)dev, idx, priv->rxqs_n);
- priv_unlock(priv);
return -rte_errno;
}
if (rxq != NULL) {
@@ -2044,7 +2005,6 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev, idx, (void *)rxq);
if (priv->started) {
rte_errno = EEXIST;
- priv_unlock(priv);
return -rte_errno;
}
(*priv->rxqs)[idx] = NULL;
@@ -2057,7 +2017,6 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rte_errno = ENOMEM;
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
- priv_unlock(priv);
return -rte_errno;
}
}
@@ -2072,7 +2031,6 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
/* Update receive callback. */
dev->rx_pkt_burst = mlx4_rx_burst;
}
- priv_unlock(priv);
return ret;
}
@@ -2092,7 +2050,6 @@ mlx4_rx_queue_release(void *dpdk_rxq)
if (rxq == NULL)
return;
priv = rxq->priv;
- priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] == rxq) {
DEBUG("%p: removing RX queue %p from list",
@@ -2104,7 +2061,6 @@ mlx4_rx_queue_release(void *dpdk_rxq)
}
rxq_cleanup(rxq);
rte_free(rxq);
- priv_unlock(priv);
}
static int
@@ -2133,11 +2089,8 @@ mlx4_dev_start(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
int ret;
- priv_lock(priv);
- if (priv->started) {
- priv_unlock(priv);
+ if (priv->started)
return 0;
- }
DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
priv->started = 1;
ret = priv_mac_addr_add(priv);
@@ -2167,13 +2120,11 @@ mlx4_dev_start(struct rte_eth_dev *dev)
(void *)dev, strerror(ret));
goto err;
}
- priv_unlock(priv);
return 0;
err:
/* Rollback. */
priv_mac_addr_del(priv);
priv->started = 0;
- priv_unlock(priv);
return ret;
}
@@ -2190,16 +2141,12 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
- if (!priv->started) {
- priv_unlock(priv);
+ if (!priv->started)
return;
- }
DEBUG("%p: detaching flows from all RX queues", (void *)dev);
priv->started = 0;
mlx4_priv_flow_stop(priv);
priv_mac_addr_del(priv);
- priv_unlock(priv);
}
/**
@@ -2279,7 +2226,6 @@ mlx4_dev_close(struct rte_eth_dev *dev)
if (priv == NULL)
return;
- priv_lock(priv);
DEBUG("%p: closing device \"%s\"",
(void *)dev,
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
@@ -2328,7 +2274,6 @@ mlx4_dev_close(struct rte_eth_dev *dev)
priv_dev_removal_interrupt_handler_uninstall(priv, dev);
priv_dev_link_interrupt_handler_uninstall(priv, dev);
priv_rx_intr_vec_disable(priv);
- priv_unlock(priv);
memset(priv, 0, sizeof(*priv));
}
@@ -2377,12 +2322,8 @@ static int
mlx4_set_link_down(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- int err;
- priv_lock(priv);
- err = priv_set_link(priv, 0);
- priv_unlock(priv);
- return err;
+ return priv_set_link(priv, 0);
}
/**
@@ -2398,12 +2339,8 @@ static int
mlx4_set_link_up(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- int err;
- priv_lock(priv);
- err = priv_set_link(priv, 1);
- priv_unlock(priv);
- return err;
+ return priv_set_link(priv, 1);
}
/**
@@ -2424,7 +2361,6 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (priv == NULL)
return;
- priv_lock(priv);
/* FIXME: we should ask the device for these values. */
info->min_rx_bufsize = 32;
info->max_rx_pktlen = 65536;
@@ -2451,7 +2387,6 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
ETH_LINK_SPEED_20G |
ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_56G;
- priv_unlock(priv);
}
/**
@@ -2472,7 +2407,6 @@ mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (priv == NULL)
return;
- priv_lock(priv);
/* Add software counters. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct rxq *rxq = (*priv->rxqs)[i];
@@ -2507,7 +2441,6 @@ mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
tmp.oerrors += txq->stats.odropped;
}
*stats = tmp;
- priv_unlock(priv);
}
/**
@@ -2525,7 +2458,6 @@ mlx4_stats_reset(struct rte_eth_dev *dev)
if (priv == NULL)
return;
- priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); ++i) {
if ((*priv->rxqs)[i] == NULL)
continue;
@@ -2540,7 +2472,6 @@ mlx4_stats_reset(struct rte_eth_dev *dev)
(*priv->txqs)[i]->stats =
(struct mlx4_txq_stats){ .idx = idx };
}
- priv_unlock(priv);
}
/**
@@ -2565,7 +2496,6 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
struct rte_eth_link dev_link;
int link_speed = 0;
- /* priv_lock() is not taken to allow concurrent calls. */
if (priv == NULL) {
rte_errno = EINVAL;
return -rte_errno;
@@ -2614,7 +2544,6 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
struct priv *priv = dev->data->dev_private;
int ret = 0;
- priv_lock(priv);
/* Set kernel interface MTU first. */
if (priv_set_mtu(priv, mtu)) {
ret = rte_errno;
@@ -2625,7 +2554,6 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
priv->mtu = mtu;
out:
- priv_unlock(priv);
assert(ret >= 0);
return -ret;
}
@@ -2652,7 +2580,6 @@ mlx4_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
int ret;
ifr.ifr_data = (void *)ðpause;
- priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
ret = rte_errno;
WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
@@ -2671,7 +2598,6 @@ mlx4_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
fc_conf->mode = RTE_FC_NONE;
ret = 0;
out:
- priv_unlock(priv);
assert(ret >= 0);
return -ret;
}
@@ -2709,7 +2635,6 @@ mlx4_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ethpause.tx_pause = 1;
else
ethpause.tx_pause = 0;
- priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
ret = rte_errno;
WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
@@ -2719,7 +2644,6 @@ mlx4_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
}
ret = 0;
out:
- priv_unlock(priv);
assert(ret >= 0);
return -ret;
}
@@ -2945,11 +2869,9 @@ mlx4_dev_link_status_handler(void *arg)
uint32_t events;
int ret;
- priv_lock(priv);
assert(priv->pending_alarm == 1);
priv->pending_alarm = 0;
ret = priv_dev_status_handler(priv, dev, &events);
- priv_unlock(priv);
if (ret > 0 && events & (1 << RTE_ETH_EVENT_INTR_LSC))
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
NULL, NULL);
@@ -2972,9 +2894,7 @@ mlx4_dev_interrupt_handler(void *cb_arg)
uint32_t ev;
int i;
- priv_lock(priv);
ret = priv_dev_status_handler(priv, dev, &ev);
- priv_unlock(priv);
if (ret > 0) {
for (i = RTE_ETH_EVENT_UNKNOWN;
i < RTE_ETH_EVENT_MAX;
@@ -223,10 +223,6 @@ struct priv {
struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
LIST_HEAD(mlx4_flows, rte_flow) flows;
struct rte_intr_conf intr_conf; /* Active interrupt configuration. */
- rte_spinlock_t lock; /* Lock for control functions. */
};
-void priv_lock(struct priv *priv);
-void priv_unlock(struct priv *priv);
-
#endif /* RTE_PMD_MLX4_H_ */
@@ -703,13 +703,9 @@ mlx4_flow_validate(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
- int ret;
struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
- priv_lock(priv);
- ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
- priv_unlock(priv);
- return ret;
+ return priv_flow_validate(priv, attr, items, actions, error, &flow);
}
/**
@@ -936,13 +932,11 @@ mlx4_flow_create(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
- priv_lock(priv);
flow = priv_flow_create(priv, attr, items, actions, error);
if (flow) {
LIST_INSERT_HEAD(&priv->flows, flow, next);
DEBUG("Flow created %p", (void *)flow);
}
- priv_unlock(priv);
return flow;
}
@@ -969,17 +963,14 @@ mlx4_flow_isolate(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
if (priv->rxqs) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "isolated mode must be set"
" before configuring the device");
- priv_unlock(priv);
return -rte_errno;
}
priv->isolated = !!enable;
- priv_unlock(priv);
return 0;
}
@@ -1017,9 +1008,7 @@ mlx4_flow_destroy(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
(void)error;
- priv_lock(priv);
priv_flow_destroy(priv, flow);
- priv_unlock(priv);
return 0;
}
@@ -1053,9 +1042,7 @@ mlx4_flow_flush(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
(void)error;
- priv_lock(priv);
priv_flow_flush(priv);
- priv_unlock(priv);
return 0;
}