aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c92
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_idc.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c274
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c46
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c13
7 files changed, 256 insertions, 184 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 8cfc68cbfa06..1bf7934d4e28 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -284,8 +284,7 @@ struct idpf_port_stats {
struct idpf_fsteer_fltr {
struct list_head list;
- u32 loc;
- u32 q_index;
+ struct ethtool_rx_flow_spec fs;
};
/**
@@ -424,14 +423,12 @@ enum idpf_user_flags {
* @rss_key: RSS hash key
* @rss_lut_size: Size of RSS lookup table
* @rss_lut: RSS lookup table
- * @cached_lut: Used to restore previously init RSS lut
*/
struct idpf_rss_data {
u16 rss_key_size;
u8 *rss_key;
u16 rss_lut_size;
u32 *rss_lut;
- u32 *cached_lut;
};
/**
@@ -558,6 +555,7 @@ struct idpf_vector_lifo {
* @max_q: Maximum possible queues
* @req_qs_chunks: Queue chunk data for requested queues
* @mac_filter_list_lock: Lock to protect mac filters
+ * @flow_steer_list_lock: Lock to protect fsteer filters
* @flags: See enum idpf_vport_config_flags
*/
struct idpf_vport_config {
@@ -565,6 +563,7 @@ struct idpf_vport_config {
struct idpf_vport_max_q max_q;
struct virtchnl2_add_queues *req_qs_chunks;
spinlock_t mac_filter_list_lock;
+ spinlock_t flow_steer_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 2589e124e41c..2efa3c08aba5 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -37,6 +37,7 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *user_config;
+ struct idpf_vport_config *vport_config;
struct idpf_fsteer_fltr *f;
struct idpf_vport *vport;
unsigned int cnt = 0;
@@ -44,7 +45,8 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+ vport_config = np->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
switch (cmd->cmd) {
case ETHTOOL_GRXCLSRLCNT:
@@ -52,26 +54,34 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = idpf_fsteer_max_rules(vport);
break;
case ETHTOOL_GRXCLSRULE:
- err = -EINVAL;
+ err = -ENOENT;
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
list_for_each_entry(f, &user_config->flow_steer_list, list)
- if (f->loc == cmd->fs.location) {
- cmd->fs.ring_cookie = f->q_index;
+ if (f->fs.location == cmd->fs.location) {
+ /* Avoid infoleak from padding: zero first,
+ * then assign fields
+ */
+ memset(&cmd->fs, 0, sizeof(cmd->fs));
+ cmd->fs = f->fs;
err = 0;
break;
}
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
break;
case ETHTOOL_GRXCLSRLALL:
cmd->data = idpf_fsteer_max_rules(vport);
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
list_for_each_entry(f, &user_config->flow_steer_list, list) {
if (cnt == cmd->rule_cnt) {
err = -EMSGSIZE;
break;
}
- rule_locs[cnt] = f->loc;
+ rule_locs[cnt] = f->fs.location;
cnt++;
}
if (!err)
cmd->rule_cnt = user_config->num_fsteer_fltrs;
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
break;
default:
break;
@@ -168,7 +178,7 @@ static int idpf_add_flow_steer(struct net_device *netdev,
struct idpf_vport *vport;
u32 flow_type, q_index;
u16 num_rxq;
- int err;
+ int err = 0;
vport = idpf_netdev_to_vport(netdev);
vport_config = vport->adapter->vport_config[np->vport_idx];
@@ -194,6 +204,29 @@ static int idpf_add_flow_steer(struct net_device *netdev,
if (!rule)
return -ENOMEM;
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr) {
+ err = -ENOMEM;
+ goto out_free_rule;
+ }
+
+ /* detect duplicate entry and reject before adding rules */
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
+ list_for_each_entry(f, &user_config->flow_steer_list, list) {
+ if (f->fs.location == fsp->location) {
+ err = -EEXIST;
+ break;
+ }
+
+ if (f->fs.location > fsp->location)
+ break;
+ parent = f;
+ }
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
+
+ if (err)
+ goto out;
+
rule->vport_id = cpu_to_le32(vport->vport_id);
rule->count = cpu_to_le32(1);
info = &rule->rule_info[0];
@@ -232,26 +265,20 @@ static int idpf_add_flow_steer(struct net_device *netdev,
goto out;
}
- fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
- if (!fltr) {
- err = -ENOMEM;
- goto out;
- }
-
- fltr->loc = fsp->location;
- fltr->q_index = q_index;
- list_for_each_entry(f, &user_config->flow_steer_list, list) {
- if (f->loc >= fltr->loc)
- break;
- parent = f;
- }
+ /* Save a copy of the user's flow spec so ethtool can later retrieve it */
+ fltr->fs = *fsp;
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
parent ? list_add(&fltr->list, &parent->list) :
list_add(&fltr->list, &user_config->flow_steer_list);
user_config->num_fsteer_fltrs++;
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
+ goto out_free_rule;
out:
+ kfree(fltr);
+out_free_rule:
kfree(rule);
return err;
}
@@ -302,17 +329,20 @@ static int idpf_del_flow_steer(struct net_device *netdev,
goto out;
}
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
list_for_each_entry_safe(f, iter,
&user_config->flow_steer_list, list) {
- if (f->loc == fsp->location) {
+ if (f->fs.location == fsp->location) {
list_del(&f->list);
kfree(f);
user_config->num_fsteer_fltrs--;
- goto out;
+ goto out_unlock;
}
}
- err = -EINVAL;
+ err = -ENOENT;
+out_unlock:
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
out:
kfree(rule);
return err;
@@ -381,7 +411,10 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
* @netdev: network interface device structure
* @rxfh: pointer to param struct (indir, key, hfunc)
*
- * Reads the indirection table directly from the hardware. Always returns 0.
+ * RSS LUT and Key information are read from driver's cached
+ * copy. When rxhash is off, rss lut will be displayed as zeros.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
static int idpf_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
@@ -389,10 +422,13 @@ static int idpf_get_rxfh(struct net_device *netdev,
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_rss_data *rss_data;
struct idpf_adapter *adapter;
+ struct idpf_vport *vport;
+ bool rxhash_ena;
int err = 0;
u16 i;
idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
adapter = np->adapter;
@@ -402,9 +438,8 @@ static int idpf_get_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
- if (!test_bit(IDPF_VPORT_UP, np->state))
- goto unlock_mutex;
+ rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (rxfh->key)
@@ -412,7 +447,7 @@ static int idpf_get_rxfh(struct net_device *netdev,
if (rxfh->indir) {
for (i = 0; i < rss_data->rss_lut_size; i++)
- rxfh->indir[i] = rss_data->rss_lut[i];
+ rxfh->indir[i] = rxhash_ena ? rss_data->rss_lut[i] : 0;
}
unlock_mutex:
@@ -452,8 +487,6 @@ static int idpf_set_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- if (!test_bit(IDPF_VPORT_UP, np->state))
- goto unlock_mutex;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP) {
@@ -469,7 +502,8 @@ static int idpf_set_rxfh(struct net_device *netdev,
rss_data->rss_lut[lut] = rxfh->indir[lut];
}
- err = idpf_config_rss(vport);
+ if (test_bit(IDPF_VPORT_UP, np->state))
+ err = idpf_config_rss(vport);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_idc.c b/drivers/net/ethernet/intel/idpf/idpf_idc.c
index 7e20a07e98e5..6dad0593f7f2 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_idc.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_idc.c
@@ -322,7 +322,7 @@ static void idpf_idc_vport_dev_down(struct idpf_adapter *adapter)
for (i = 0; i < adapter->num_alloc_vports; i++) {
struct idpf_vport *vport = adapter->vports[i];
- if (!vport)
+ if (!vport || !vport->vdev_info)
continue;
idpf_unplug_aux_dev(vport->vdev_info->adev);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 7ce4eb71a433..131a8121839b 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -443,6 +443,29 @@ send_dealloc_vecs:
}
/**
+ * idpf_del_all_flow_steer_filters - Delete all flow steer filters in list
+ * @vport: main vport struct
+ *
+ * Takes flow_steer_list_lock spinlock. Deletes all filters
+ */
+static void idpf_del_all_flow_steer_filters(struct idpf_vport *vport)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_fsteer_fltr *f, *ftmp;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
+ list_for_each_entry_safe(f, ftmp, &vport_config->user_config.flow_steer_list,
+ list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ vport_config->user_config.num_fsteer_fltrs = 0;
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
+}
+
+/**
* idpf_find_mac_filter - Search filter list for specific mac filter
* @vconfig: Vport config structure
* @macaddr: The MAC address
@@ -729,6 +752,65 @@ static int idpf_init_mac_addr(struct idpf_vport *vport,
return 0;
}
+static void idpf_detach_and_close(struct idpf_adapter *adapter)
+{
+ int max_vports = adapter->max_vports;
+
+ for (int i = 0; i < max_vports; i++) {
+ struct net_device *netdev = adapter->netdevs[i];
+
+ /* If the interface is in detached state, that means the
+ * previous reset was not handled successfully for this
+ * vport.
+ */
+ if (!netif_device_present(netdev))
+ continue;
+
+ /* Hold RTNL to protect racing with callbacks */
+ rtnl_lock();
+ netif_device_detach(netdev);
+ if (netif_running(netdev)) {
+ set_bit(IDPF_VPORT_UP_REQUESTED,
+ adapter->vport_config[i]->flags);
+ dev_close(netdev);
+ }
+ rtnl_unlock();
+ }
+}
+
+static void idpf_attach_and_open(struct idpf_adapter *adapter)
+{
+ int max_vports = adapter->max_vports;
+
+ for (int i = 0; i < max_vports; i++) {
+ struct idpf_vport *vport = adapter->vports[i];
+ struct idpf_vport_config *vport_config;
+ struct net_device *netdev;
+
+ /* In case of a critical error in the init task, the vport
+ * will be freed. Only continue to restore the netdevs
+ * if the vport is allocated.
+ */
+ if (!vport)
+ continue;
+
+ /* No need for RTNL on attach as this function is called
+ * following detach and dev_close(). We do take RTNL for
+ * dev_open() below as it can race with external callbacks
+ * following the call to netif_device_attach().
+ */
+ netdev = adapter->netdevs[i];
+ netif_device_attach(netdev);
+ vport_config = adapter->vport_config[vport->idx];
+ if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED,
+ vport_config->flags)) {
+ rtnl_lock();
+ dev_open(netdev, NULL);
+ rtnl_unlock();
+ }
+ }
+}
+
/**
* idpf_cfg_netdev - Allocate, configure and register a netdev
* @vport: main vport structure
@@ -991,7 +1073,7 @@ static void idpf_vport_rel(struct idpf_vport *vport)
u16 idx = vport->idx;
vport_config = adapter->vport_config[vport->idx];
- idpf_deinit_rss(vport);
+ idpf_deinit_rss_lut(vport);
rss_data = &vport_config->user_config.rss_data;
kfree(rss_data->rss_key);
rss_data->rss_key = NULL;
@@ -1023,6 +1105,8 @@ static void idpf_vport_rel(struct idpf_vport *vport)
kfree(adapter->vport_config[idx]->req_qs_chunks);
adapter->vport_config[idx]->req_qs_chunks = NULL;
}
+ kfree(vport->rx_ptype_lkup);
+ vport->rx_ptype_lkup = NULL;
kfree(vport);
adapter->num_alloc_vports--;
}
@@ -1041,12 +1125,15 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
idpf_idc_deinit_vport_aux_device(vport->vdev_info);
idpf_deinit_mac_addr(vport);
- idpf_vport_stop(vport, true);
- if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
+ if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) {
+ idpf_vport_stop(vport, true);
idpf_decfg_netdev(vport);
- if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ }
+ if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
idpf_del_all_mac_filters(vport);
+ idpf_del_all_flow_steer_filters(vport);
+ }
if (adapter->netdevs[i]) {
struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
@@ -1139,6 +1226,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
u16 idx = adapter->next_vport;
struct idpf_vport *vport;
u16 num_max_q;
+ int err;
if (idx == IDPF_NO_FREE_SLOT)
return NULL;
@@ -1189,10 +1277,11 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
idpf_vport_init(vport, max_q);
- /* This alloc is done separate from the LUT because it's not strictly
- * dependent on how many queues we have. If we change number of queues
- * and soft reset we'll need a new LUT but the key can remain the same
- * for as long as the vport exists.
+ /* LUT and key are both initialized here. Key is not strictly dependent
+ * on how many queues we have. If we change number of queues and soft
+ * reset is initiated, LUT will be freed and a new LUT will be allocated
+ * as per the updated number of queues during vport bringup. However,
+ * the key remains the same for as long as the vport exists.
*/
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
@@ -1202,6 +1291,11 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
/* Initialize default rss key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
+ /* Initialize default rss LUT */
+ err = idpf_init_rss_lut(vport);
+ if (err)
+ goto free_rss_key;
+
/* fill vport slot in the adapter struct */
adapter->vports[idx] = vport;
adapter->vport_ids[idx] = idpf_get_vport_id(vport);
@@ -1212,6 +1306,8 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
return vport;
+free_rss_key:
+ kfree(rss_data->rss_key);
free_vector_idxs:
kfree(vport->q_vector_idxs);
free_vport:
@@ -1388,7 +1484,6 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct idpf_adapter *adapter = vport->adapter;
- struct idpf_vport_config *vport_config;
int err;
if (test_bit(IDPF_VPORT_UP, np->state))
@@ -1429,14 +1524,14 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
- goto queues_rel;
+ goto intr_deinit;
}
err = idpf_rx_bufs_init_all(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
- goto queues_rel;
+ goto intr_deinit;
}
idpf_rx_init_buf_tail(vport);
@@ -1482,13 +1577,9 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
idpf_restore_features(vport);
- vport_config = adapter->vport_config[vport->idx];
- if (vport_config->user_config.rss_data.rss_lut)
- err = idpf_config_rss(vport);
- else
- err = idpf_init_rss(vport);
+ err = idpf_config_rss(vport);
if (err) {
- dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
+ dev_err(&adapter->pdev->dev, "Failed to configure RSS for vport %u: %d\n",
vport->vport_id, err);
goto disable_vport;
}
@@ -1497,7 +1588,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
if (err) {
dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
vport->vport_id, err);
- goto deinit_rss;
+ goto disable_vport;
}
if (rtnl)
@@ -1505,8 +1596,6 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
return 0;
-deinit_rss:
- idpf_deinit_rss(vport);
disable_vport:
idpf_send_disable_vport_msg(vport);
disable_queues:
@@ -1544,7 +1633,6 @@ void idpf_init_task(struct work_struct *work)
struct idpf_vport_config *vport_config;
struct idpf_vport_max_q max_q;
struct idpf_adapter *adapter;
- struct idpf_netdev_priv *np;
struct idpf_vport *vport;
u16 num_default_vports;
struct pci_dev *pdev;
@@ -1579,10 +1667,15 @@ void idpf_init_task(struct work_struct *work)
goto unwind_vports;
}
+ err = idpf_send_get_rx_ptype_msg(vport);
+ if (err)
+ goto unwind_vports;
+
index = vport->idx;
vport_config = adapter->vport_config[index];
spin_lock_init(&vport_config->mac_filter_list_lock);
+ spin_lock_init(&vport_config->flow_steer_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
@@ -1590,21 +1683,11 @@ void idpf_init_task(struct work_struct *work)
err = idpf_check_supported_desc_ids(vport);
if (err) {
dev_err(&pdev->dev, "failed to get required descriptor ids\n");
- goto cfg_netdev_err;
+ goto unwind_vports;
}
if (idpf_cfg_netdev(vport))
- goto cfg_netdev_err;
-
- err = idpf_send_get_rx_ptype_msg(vport);
- if (err)
- goto handle_err;
-
- /* Once state is put into DOWN, driver is ready for dev_open */
- np = netdev_priv(vport->netdev);
- clear_bit(IDPF_VPORT_UP, np->state);
- if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
- idpf_vport_open(vport, true);
+ goto unwind_vports;
/* Spawn and return 'idpf_init_task' work queue until all the
* default vports are created
@@ -1635,21 +1718,15 @@ void idpf_init_task(struct work_struct *work)
set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
}
- /* As all the required vports are created, clear the reset flag
- * unconditionally here in case we were in reset and the link was down.
- */
+ /* Clear the reset and load bits as all vports are created */
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+ clear_bit(IDPF_HR_DRV_LOAD, adapter->flags);
/* Start the statistics task now */
queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
return;
-handle_err:
- idpf_decfg_netdev(vport);
-cfg_netdev_err:
- idpf_vport_rel(vport);
- adapter->vports[index] = NULL;
unwind_vports:
if (default_vport) {
for (index = 0; index < adapter->max_vports; index++) {
@@ -1657,6 +1734,15 @@ unwind_vports:
idpf_vport_dealloc(adapter->vports[index]);
}
}
+ /* Cleanup after vc_core_init, which has no way of knowing the
+ * init task failed on driver load.
+ */
+ if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
+ cancel_delayed_work_sync(&adapter->serv_task);
+ cancel_delayed_work_sync(&adapter->mbx_task);
+ }
+ idpf_ptp_release(adapter);
+
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
}
@@ -1787,27 +1873,6 @@ static int idpf_check_reset_complete(struct idpf_hw *hw,
}
/**
- * idpf_set_vport_state - Set the vport state to be after the reset
- * @adapter: Driver specific private structure
- */
-static void idpf_set_vport_state(struct idpf_adapter *adapter)
-{
- u16 i;
-
- for (i = 0; i < adapter->max_vports; i++) {
- struct idpf_netdev_priv *np;
-
- if (!adapter->netdevs[i])
- continue;
-
- np = netdev_priv(adapter->netdevs[i]);
- if (test_bit(IDPF_VPORT_UP, np->state))
- set_bit(IDPF_VPORT_UP_REQUESTED,
- adapter->vport_config[i]->flags);
- }
-}
-
-/**
* idpf_init_hard_reset - Initiate a hardware reset
* @adapter: Driver specific private structure
*
@@ -1815,37 +1880,25 @@ static void idpf_set_vport_state(struct idpf_adapter *adapter)
* reallocate. Also reinitialize the mailbox. Return 0 on success,
* negative on failure.
*/
-static int idpf_init_hard_reset(struct idpf_adapter *adapter)
+static void idpf_init_hard_reset(struct idpf_adapter *adapter)
{
struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
struct device *dev = &adapter->pdev->dev;
- struct net_device *netdev;
int err;
- u16 i;
+ idpf_detach_and_close(adapter);
mutex_lock(&adapter->vport_ctrl_lock);
dev_info(dev, "Device HW Reset initiated\n");
- /* Avoid TX hangs on reset */
- for (i = 0; i < adapter->max_vports; i++) {
- netdev = adapter->netdevs[i];
- if (!netdev)
- continue;
-
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
- }
-
/* Prepare for reset */
- if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
+ if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
bool is_reset = idpf_is_reset_detected(adapter);
idpf_idc_issue_reset_event(adapter->cdev_info);
- idpf_set_vport_state(adapter);
idpf_vc_core_deinit(adapter);
if (!is_reset)
reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
@@ -1892,11 +1945,14 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
unlock_mutex:
mutex_unlock(&adapter->vport_ctrl_lock);
- /* Wait until all vports are created to init RDMA CORE AUX */
- if (!err)
- err = idpf_idc_init(adapter);
-
- return err;
+ /* Attempt to restore netdevs and initialize RDMA CORE AUX device,
+ * provided vc_core_init succeeded. It is still possible that
+ * vports are not allocated at this point if the init task failed.
+ */
+ if (!err) {
+ idpf_attach_and_open(adapter);
+ idpf_idc_init(adapter);
+ }
}
/**
@@ -1997,7 +2053,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
idpf_vport_stop(vport, false);
}
- idpf_deinit_rss(vport);
/* We're passing in vport here because we need its wait_queue
* to send a message and it should be getting all the vport
* config data out of the adapter but we need to be careful not
@@ -2023,6 +2078,10 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
if (err)
goto err_open;
+ if (reset_cause == IDPF_SR_Q_CHANGE &&
+ !netif_is_rxfh_configured(vport->netdev))
+ idpf_fill_dflt_rss_lut(vport);
+
if (vport_is_up)
err = idpf_vport_open(vport, false);
@@ -2166,40 +2225,6 @@ static void idpf_set_rx_mode(struct net_device *netdev)
}
/**
- * idpf_vport_manage_rss_lut - disable/enable RSS
- * @vport: the vport being changed
- *
- * In the event of disable request for RSS, this function will zero out RSS
- * LUT, while in the event of enable request for RSS, it will reconfigure RSS
- * LUT with the default LUT configuration.
- */
-static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
-{
- bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
- struct idpf_rss_data *rss_data;
- u16 idx = vport->idx;
- int lut_size;
-
- rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
- lut_size = rss_data->rss_lut_size * sizeof(u32);
-
- if (ena) {
- /* This will contain the default or user configured LUT */
- memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
- } else {
- /* Save a copy of the current LUT to be restored later if
- * requested.
- */
- memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
-
- /* Zero out the current LUT to disable */
- memset(rss_data->rss_lut, 0, lut_size);
- }
-
- return idpf_config_rss(vport);
-}
-
-/**
* idpf_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -2224,10 +2249,19 @@ static int idpf_set_features(struct net_device *netdev,
}
if (changed & NETIF_F_RXHASH) {
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
netdev->features ^= NETIF_F_RXHASH;
- err = idpf_vport_manage_rss_lut(vport);
- if (err)
- goto unlock_mutex;
+
+ /* If the interface is not up when changing the rxhash, update
+ * to the HW is skipped. The updated LUT will be committed to
+ * the HW when the interface is brought up.
+ */
+ if (test_bit(IDPF_VPORT_UP, np->state)) {
+ err = idpf_config_rss(vport);
+ if (err)
+ goto unlock_mutex;
+ }
}
if (changed & NETIF_F_GRO_HW) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 1d91c56f7469..7f3933ca9edc 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -695,9 +695,10 @@ err:
static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
{
struct libeth_fq fq = {
- .count = rxq->desc_count,
- .type = LIBETH_FQE_MTU,
- .nid = idpf_q_vector_to_mem(rxq->q_vector),
+ .count = rxq->desc_count,
+ .type = LIBETH_FQE_MTU,
+ .buf_len = IDPF_RX_MAX_BUF_SZ,
+ .nid = idpf_q_vector_to_mem(rxq->q_vector),
};
int ret;
@@ -754,6 +755,7 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
.truesize = bufq->truesize,
.count = bufq->desc_count,
.type = type,
+ .buf_len = IDPF_RX_MAX_BUF_SZ,
.hsplit = idpf_queue_has(HSPLIT_EN, bufq),
.xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
@@ -4641,7 +4643,7 @@ int idpf_config_rss(struct idpf_vport *vport)
* idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
* @vport: virtual port structure
*/
-static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
+void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
u16 num_active_rxq = vport->num_rxq;
@@ -4650,57 +4652,47 @@ static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- for (i = 0; i < rss_data->rss_lut_size; i++) {
+ for (i = 0; i < rss_data->rss_lut_size; i++)
rss_data->rss_lut[i] = i % num_active_rxq;
- rss_data->cached_lut[i] = rss_data->rss_lut[i];
- }
}
/**
- * idpf_init_rss - Allocate and initialize RSS resources
+ * idpf_init_rss_lut - Allocate and initialize RSS LUT
* @vport: virtual port
*
- * Return 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_init_rss(struct idpf_vport *vport)
+int idpf_init_rss_lut(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_rss_data *rss_data;
- u32 lut_size;
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ if (!rss_data->rss_lut) {
+ u32 lut_size;
- lut_size = rss_data->rss_lut_size * sizeof(u32);
- rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
- if (!rss_data->rss_lut)
- return -ENOMEM;
-
- rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
- if (!rss_data->cached_lut) {
- kfree(rss_data->rss_lut);
- rss_data->rss_lut = NULL;
-
- return -ENOMEM;
+ lut_size = rss_data->rss_lut_size * sizeof(u32);
+ rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
+ if (!rss_data->rss_lut)
+ return -ENOMEM;
}
/* Fill the default RSS lut values */
idpf_fill_dflt_rss_lut(vport);
- return idpf_config_rss(vport);
+ return 0;
}
/**
- * idpf_deinit_rss - Release RSS resources
+ * idpf_deinit_rss_lut - Release RSS LUT
* @vport: virtual port
*/
-void idpf_deinit_rss(struct idpf_vport *vport)
+void idpf_deinit_rss_lut(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_rss_data *rss_data;
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- kfree(rss_data->cached_lut);
- rss_data->cached_lut = NULL;
kfree(rss_data->rss_lut);
rss_data->rss_lut = NULL;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 75b977094741..423cc9486dce 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -101,6 +101,7 @@ do { \
idx = 0; \
} while (0)
+#define IDPF_RX_MAX_BUF_SZ (16384 - 128)
#define IDPF_RX_BUF_STRIDE 32
#define IDPF_RX_BUF_POST_STRIDE 16
#define IDPF_LOW_WATERMARK 64
@@ -1085,9 +1086,10 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
void idpf_vport_intr_deinit(struct idpf_vport *vport);
int idpf_vport_intr_init(struct idpf_vport *vport);
void idpf_vport_intr_ena(struct idpf_vport *vport);
+void idpf_fill_dflt_rss_lut(struct idpf_vport *vport);
int idpf_config_rss(struct idpf_vport *vport);
-int idpf_init_rss(struct idpf_vport *vport);
-void idpf_deinit_rss(struct idpf_vport *vport);
+int idpf_init_rss_lut(struct idpf_vport *vport);
+void idpf_deinit_rss_lut(struct idpf_vport *vport);
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 5bbe7d9294c1..cb702eac86c8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -2804,6 +2804,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
* @vport: virtual port data structure
* @get: flag to set or get rss look up table
*
+ * When rxhash is disabled, RSS LUT will be configured with zeros. If rxhash
+ * is enabled, the LUT values stored in driver's soft copy will be used to setup
+ * the HW.
+ *
* Returns 0 on success, negative on failure.
*/
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
@@ -2814,10 +2818,12 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
struct idpf_rss_data *rss_data;
int buf_size, lut_buf_size;
ssize_t reply_sz;
+ bool rxhash_ena;
int i;
rss_data =
&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
+ rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl)
@@ -2839,7 +2845,8 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
} else {
rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
for (i = 0; i < rss_data->rss_lut_size; i++)
- rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
+ rl->lut[i] = rxhash_ena ?
+ cpu_to_le32(rss_data->rss_lut[i]) : 0;
xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
}
@@ -3570,6 +3577,7 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
+ struct idpf_hw *hw = &adapter->hw;
bool remove_in_prog;
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
@@ -3593,6 +3601,9 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
idpf_vport_params_buf_rel(adapter);
+ kfree(hw->lan_regs);
+ hw->lan_regs = NULL;
+
kfree(adapter->vports);
adapter->vports = NULL;