From 01573e231f18eb2d99162747186f59511f56b64d Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 08 Dec 2023 10:40:48 +0000
Subject: [PATCH] 移去rt
---
kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 1240 ++++++++++++++++++++++++++++++++++++++--------------------
1 files changed, 817 insertions(+), 423 deletions(-)
diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index a39a8fe..bb2a79b 100644
--- a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -130,17 +130,18 @@
/***********************misc routines*****************************/
/**
- * i40e_vc_disable_vf
+ * i40e_vc_reset_vf
* @vf: pointer to the VF info
- *
- * Disable the VF through a SW reset.
+ * @notify_vf: notify vf about reset or not
+ * Reset VF handler.
**/
-static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
+static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
{
struct i40e_pf *pf = vf->pf;
int i;
- i40e_vc_notify_vf_reset(vf);
+ if (notify_vf)
+ i40e_vc_notify_vf_reset(vf);
/* We want to ensure that an actual reset occurs initiated after this
* function was called. However, we do not want to wait forever, so
@@ -158,9 +159,14 @@
usleep_range(10000, 20000);
}
- dev_warn(&vf->pf->pdev->dev,
- "Failed to initiate reset for VF %d after 200 milliseconds\n",
- vf->vf_id);
+ if (notify_vf)
+ dev_warn(&vf->pf->pdev->dev,
+ "Failed to initiate reset for VF %d after 200 milliseconds\n",
+ vf->vf_id);
+ else
+ dev_dbg(&vf->pf->pdev->dev,
+ "Failed to initiate reset for VF %d after 200 milliseconds\n",
+ vf->vf_id);
}
/**
@@ -446,7 +452,7 @@
struct virtchnl_iwarp_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
- u32 msix_vf, size;
+ u32 msix_vf;
int ret = 0;
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -460,11 +466,10 @@
goto err_out;
}
- size = sizeof(struct virtchnl_iwarp_qvlist_info) +
- (sizeof(struct virtchnl_iwarp_qv_info) *
- (qvlist_info->num_vectors - 1));
kfree(vf->qvlist_info);
- vf->qvlist_info = kzalloc(size, GFP_KERNEL);
+ vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
+ qvlist_info->num_vectors - 1),
+ GFP_KERNEL);
if (!vf->qvlist_info) {
ret = -ENOMEM;
goto err_out;
@@ -476,13 +481,14 @@
qv_info = &qvlist_info->qv_info[i];
if (!qv_info)
continue;
- v_idx = qv_info->v_idx;
/* Validate vector id belongs to this vf */
- if (!i40e_vc_isvalid_vector_id(vf, v_idx)) {
+ if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
ret = -EINVAL;
goto err_free;
}
+
+ v_idx = qv_info->v_idx;
vf->qvlist_info->qv_info[i] = *qv_info;
@@ -958,7 +964,6 @@
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
vf->lan_vsi_idx = 0;
vf->lan_vsi_id = 0;
- vf->num_mac = 0;
}
/* do the accounting and remove additional ADq VSI's */
@@ -1111,6 +1116,240 @@
}
/**
+ * __i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of VLANs offloaded on this VF
+ **/
+static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ u16 num_vlans = 0, bkt;
+
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+ num_vlans++;
+ }
+
+ return num_vlans;
+}
+
+/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
+ **/
+static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ int num_vlans;
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ return num_vlans;
+}
+
+/**
+ * i40e_get_vlan_list_sync
+ * @vsi: pointer to the VSI
+ * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
+ * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
+ * This array is allocated here, but has to be freed in caller.
+ *
+ * Called to get number of VLANs and VLAN list present in mac_filter_hash.
+ **/
+static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
+ s16 **vlan_list)
+{
+ struct i40e_mac_filter *f;
+ int i = 0;
+ int bkt;
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
+ *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
+ if (!(*vlan_list))
+ goto err;
+
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+ continue;
+ (*vlan_list)[i++] = f->vlan;
+ }
+err:
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+}
+
+/**
+ * i40e_set_vsi_promisc
+ * @vf: pointer to the VF struct
+ * @seid: VSI number
+ * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
+ * for a given VLAN
+ * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
+ * for a given VLAN
+ * @vl: List of VLANs - apply filter for given VLANs
+ * @num_vlans: Number of elements in @vl
+ **/
+static i40e_status
+i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ bool unicast_enable, s16 *vl, u16 num_vlans)
+{
+ i40e_status aq_ret, aq_tmp = 0;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ /* No VLAN to set promisc on, set on VSI */
+ if (!num_vlans || !vl) {
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
+ multi_enable,
+ NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+
+ return aq_ret;
+ }
+
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
+ unicast_enable,
+ NULL, true);
+
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+
+ return aq_ret;
+ }
+
+ for (i = 0; i < num_vlans; i++) {
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
+ multi_enable,
+ vl[i], NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+
+ if (!aq_tmp)
+ aq_tmp = aq_ret;
+ }
+
+ aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
+ unicast_enable,
+ vl[i], NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+
+ if (!aq_tmp)
+ aq_tmp = aq_ret;
+ }
+ }
+
+ if (aq_tmp)
+ aq_ret = aq_tmp;
+
+ return aq_ret;
+}
+
+/**
+ * i40e_config_vf_promiscuous_mode
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI id
+ * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
+ * @alluni: set MAC L2 layer unicast promiscuous enable/disable
+ *
+ * Called from the VF to configure the promiscuous mode of
+ * VF vsis and from the VF reset path to reset promiscuous mode.
+ **/
+static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+ u16 vsi_id,
+ bool allmulti,
+ bool alluni)
+{
+ i40e_status aq_ret = I40E_SUCCESS;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi;
+ u16 num_vlans;
+ s16 *vl;
+
+ vsi = i40e_find_vsi_from_id(pf, vsi_id);
+ if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
+ return I40E_ERR_PARAM;
+
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
+ alluni, &vf->port_vlan_id, 1);
+ return aq_ret;
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
+
+ if (!vl)
+ return I40E_ERR_NO_MEMORY;
+
+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
+ vl, num_vlans);
+ kfree(vl);
+ return aq_ret;
+ }
+
+ /* no VLANs to set on, set on VSI */
+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
+ NULL, 0);
+ return aq_ret;
+}
+
+/**
+ * i40e_sync_vfr_reset
+ * @hw: pointer to hw struct
+ * @vf_id: VF identifier
+ *
+ * Before trigger hardware reset, we need to know if no other process has
+ * reserved the hardware for any reset operations. This check is done by
+ * examining the status of the RSTAT1 register used to signal the reset.
+ **/
+static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
+ reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (reg)
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -EAGAIN;
+}
+
+/**
* i40e_trigger_vf_reset
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
@@ -1124,9 +1363,11 @@
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg, reg_idx, bit_idx;
+ bool vf_active;
+ u32 radq;
/* warn the VF */
- clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
* in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
@@ -1140,7 +1381,19 @@
* just need to clean up, so don't hit the VFRTRIG register.
*/
if (!flr) {
- /* reset VF using VPGEN_VFRTRIG reg */
+ /* Sync VFR reset before trigger next one */
+ radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (vf_active && !radq)
+ /* waiting for finish reset by virtual driver */
+ if (i40e_sync_vfr_reset(hw, vf->vf_id))
+ dev_info(&pf->pdev->dev,
+ "Reset VF %d never finished\n",
+ vf->vf_id);
+
+ /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
+ * in progress state in rstat1 register.
+ */
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
@@ -1170,6 +1423,9 @@
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg;
+
+ /* disable promisc modes in case they were enabled */
+ i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
/* free VF resources to begin resetting the VSI state */
i40e_free_vf_res(vf);
@@ -1227,10 +1483,12 @@
if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
return true;
- /* If the VFs have been disabled, this means something else is
- * resetting the VF, so we shouldn't continue.
- */
- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ /* Bail out if VFs are disabled. */
+ if (test_bit(__I40E_VF_DISABLE, pf->state))
+ return true;
+
+ /* If VF is being reset already we don't need to continue. */
+ if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
return true;
i40e_trigger_vf_reset(vf, flr);
@@ -1267,7 +1525,8 @@
i40e_cleanup_reset_vf(vf);
i40e_flush(hw);
- clear_bit(__I40E_VF_DISABLE, pf->state);
+ usleep_range(20000, 40000);
+ clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
return true;
}
@@ -1300,8 +1559,12 @@
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- i40e_trigger_vf_reset(&pf->vf[v], flr);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+ /* If VF is being reset no need to trigger reset again */
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ i40e_trigger_vf_reset(&pf->vf[v], flr);
+ }
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -1317,9 +1580,11 @@
*/
while (v < pf->num_alloc_vfs) {
vf = &pf->vf[v];
- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
- break;
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+ break;
+ }
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
@@ -1347,6 +1612,10 @@
if (pf->vf[v].lan_vsi_idx == 0)
continue;
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@@ -1358,6 +1627,10 @@
if (pf->vf[v].lan_vsi_idx == 0)
continue;
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@@ -1367,10 +1640,16 @@
mdelay(50);
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++)
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_cleanup_reset_vf(&pf->vf[v]);
+ }
i40e_flush(hw);
+ usleep_range(20000, 40000);
clear_bit(__I40E_VF_DISABLE, pf->state);
return true;
@@ -1575,13 +1854,20 @@
int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
- return i40e_pci_sriov_enable(pdev, num_vfs);
+ ret = i40e_pci_sriov_enable(pdev, num_vfs);
+ goto sriov_configure_out;
}
if (!pci_vfs_assigned(pf->pdev)) {
@@ -1590,9 +1876,12 @@
i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto sriov_configure_out;
}
- return 0;
+sriov_configure_out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
}
/***********************virtual channel routines******************/
@@ -1623,25 +1912,6 @@
hw = &pf->hw;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- /* single place to detect unsuccessful return values */
- if (v_retval) {
- vf->num_invalid_msgs++;
- dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
- vf->vf_id, v_opcode, v_retval);
- if (vf->num_invalid_msgs >
- I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
- dev_err(&pf->pdev->dev,
- "Number of invalid messages exceeded for VF %d\n",
- vf->vf_id);
- dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
- set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
- }
- } else {
- vf->num_valid_msgs++;
- /* reset the invalid counter, if a valid message is received. */
- vf->num_invalid_msgs = 0;
- }
-
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret) {
@@ -1667,6 +1937,32 @@
i40e_status retval)
{
return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+}
+
+/**
+ * i40e_sync_vf_state
+ * @vf: pointer to the VF info
+ * @state: VF state
+ *
+ * Called from a VF message to synchronize the service with a potential
+ * VF reset state
+ **/
+static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
+{
+ int i;
+
+ /* When handling some messages, it needs VF state to be set.
+ * It is possible that this flag is cleared during VF reset,
+ * so there is a need to wait until the end of the reset to
+ * handle the request message correctly.
+ */
+ for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
+ if (test_bit(state, &vf->vf_states))
+ return true;
+ usleep_range(10000, 20000);
+ }
+
+ return test_bit(state, &vf->vf_states);
}
/**
@@ -1713,6 +2009,25 @@
}
/**
+ * i40e_vc_get_max_frame_size
+ * @vf: pointer to the VF
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ **/
+static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
+{
+ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
+
+ if (vf->port_vlan_id)
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1726,17 +2041,15 @@
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
int num_vsis = 1;
- int len = 0;
+ size_t len = 0;
int ret;
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
- len = (sizeof(struct virtchnl_vf_resource) +
- sizeof(struct virtchnl_vsi_resource) * num_vsis);
-
+ len = struct_size(vfres, vsi_res, num_vsis);
vfres = kzalloc(len, GFP_KERNEL);
if (!vfres) {
aq_ret = I40E_ERR_NO_MEMORY;
@@ -1814,6 +2127,7 @@
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
@@ -1837,179 +2151,84 @@
}
/**
- * i40e_vc_reset_vf_msg
- * @vf: pointer to the VF info
- *
- * called from the VF to reset itself,
- * unlike other virtchnl messages, PF driver
- * doesn't send the response back to the VF
- **/
-static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
-{
- if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
- i40e_reset_vf(vf, false);
-}
-
-/**
- * i40e_getnum_vf_vsi_vlan_filters
- * @vsi: pointer to the vsi
- *
- * called to get the number of VLANs offloaded on this VF
- **/
-static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
-{
- struct i40e_mac_filter *f;
- int num_vlans = 0, bkt;
-
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
- if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
- num_vlans++;
- }
-
- return num_vlans;
-}
-
-/**
* i40e_vc_config_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to configure the promiscuous mode of
* VF vsis
**/
-static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
- u8 *msg, u16 msglen)
+static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_mac_filter *f;
i40e_status aq_ret = 0;
bool allmulti = false;
- struct i40e_vsi *vsi;
bool alluni = false;
- int aq_err = 0;
- int bkt;
- vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
- !vsi) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
- goto error_param;
+ goto err_out;
}
if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
dev_err(&pf->pdev->dev,
"Unprivileged VF %d is attempting to configure promiscuous mode\n",
vf->vf_id);
- /* Lie to the VF on purpose. */
+
+ /* Lie to the VF on purpose, because this is an error we can
+ * ignore. Unprivileged VF is not a virtual channel error.
+ */
aq_ret = 0;
- goto error_param;
+ goto err_out;
}
+
+ if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+
/* Multicast promiscuous handling*/
if (info->flags & FLAG_VF_MULTICAST_PROMISC)
allmulti = true;
- if (vf->port_vlan_id) {
- aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
- allmulti,
- vf->port_vlan_id,
- NULL);
- } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
- if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
- continue;
- aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
- vsi->seid,
- allmulti,
- f->vlan,
- NULL);
- aq_err = pf->hw.aq.asq_last_status;
- if (aq_ret) {
- dev_err(&pf->pdev->dev,
- "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
- f->vlan,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- break;
- }
- }
- } else {
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
- allmulti, NULL);
- aq_err = pf->hw.aq.asq_last_status;
- if (aq_ret) {
- dev_err(&pf->pdev->dev,
- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
- vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- goto error_param;
- }
- }
-
- if (!aq_ret) {
- dev_info(&pf->pdev->dev,
- "VF %d successfully set multicast promiscuous mode\n",
- vf->vf_id);
- if (allmulti)
- set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
- else
- clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
- }
-
if (info->flags & FLAG_VF_UNICAST_PROMISC)
alluni = true;
- if (vf->port_vlan_id) {
- aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
- alluni,
- vf->port_vlan_id,
- NULL);
- } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
- if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
- continue;
- aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
- vsi->seid,
- alluni,
- f->vlan,
- NULL);
- aq_err = pf->hw.aq.asq_last_status;
- if (aq_ret)
- dev_err(&pf->pdev->dev,
- "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
- f->vlan,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- }
- } else {
- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
- alluni, NULL,
- true);
- aq_err = pf->hw.aq.asq_last_status;
- if (aq_ret) {
- dev_err(&pf->pdev->dev,
- "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
- vf->vf_id, info->flags,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- goto error_param;
- }
- }
+ aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
+ alluni);
+ if (aq_ret)
+ goto err_out;
- if (!aq_ret) {
+ if (allmulti) {
+ if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set multicast promiscuous mode\n",
+ vf->vf_id);
+ } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
+ &vf->vf_states))
dev_info(&pf->pdev->dev,
- "VF %d successfully set unicast promiscuous mode\n",
+ "VF %d successfully unset multicast promiscuous mode\n",
vf->vf_id);
- if (alluni)
- set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
- else
- clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
- }
-error_param:
+ if (alluni) {
+ if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set unicast promiscuous mode\n",
+ vf->vf_id);
+ } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset unicast promiscuous mode\n",
+ vf->vf_id);
+
+err_out:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
@@ -2020,37 +2239,58 @@
* i40e_vc_config_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to configure the rx/tx
* queues
**/
-static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
- struct i40e_pf *pf = vf->pf;
u16 vsi_id, vsi_queue_id = 0;
+ struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
int i, j = 0, idx = 0;
+ struct i40e_vsi *vsi;
+ u16 num_qps_all = 0;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vf->adq_enabled) {
+ for (i = 0; i < vf->num_tc; i++)
+ num_qps_all += vf->ch[i].num_qps;
+ if (num_qps_all != qci->num_queue_pairs) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
vsi_id = qci->vsi_id;
-
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
-
- if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
if (!vf->adq_enabled) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+ qpi->txq.queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
vsi_queue_id = qpi->txq.queue_id;
if (qpi->txq.vsi_id != qci->vsi_id ||
@@ -2061,9 +2301,12 @@
}
}
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
+ if (vf->adq_enabled) {
+ if (idx >= ARRAY_SIZE(vf->ch)) {
+ aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
+ goto error_param;
+ }
+ vsi_id = vf->ch[idx].vsi_id;
}
if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
@@ -2078,8 +2321,12 @@
* VF does not know about these additional VSIs and all
* it cares is about its own queues. PF configures these queues
* to its appropriate VSIs based on TC mapping
- **/
+ */
if (vf->adq_enabled) {
+ if (idx >= ARRAY_SIZE(vf->ch)) {
+ aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
+ goto error_param;
+ }
if (j == (vf->ch[idx].num_qps - 1)) {
idx++;
j = 0; /* resetting the queue count */
@@ -2088,7 +2335,6 @@
j++;
vsi_queue_id++;
}
- vsi_id = vf->ch[idx].vsi_id;
}
}
/* set vsi num_queue_pairs in use to num configured by VF */
@@ -2096,9 +2342,15 @@
pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
qci->num_queue_pairs;
} else {
- for (i = 0; i < vf->num_tc; i++)
- pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
- vf->ch[i].num_qps;
+ for (i = 0; i < vf->num_tc; i++) {
+ vsi = pf->vsi[vf->ch[i].vsi_idx];
+ vsi->num_queue_pairs = vf->ch[i].num_qps;
+
+ if (i40e_update_adq_vsi_queues(vsi, i)) {
+ aq_ret = I40E_ERR_CONFIG;
+ goto error_param;
+ }
+ }
}
error_param:
@@ -2108,7 +2360,8 @@
}
/**
- * i40e_validate_queue_map
+ * i40e_validate_queue_map - check queue map is valid
+ * @vf: the VF structure pointer
* @vsi_id: vsi id
* @queuemap: Tx or Rx queue map
*
@@ -2138,35 +2391,39 @@
* i40e_vc_config_irq_map_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to configure the irq to
* queue map
**/
-static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
struct virtchnl_vector_map *map;
- u16 vsi_id, vector_id;
+ u16 vsi_id;
i40e_status aq_ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (irqmap_info->num_vectors >
+ vf->pf->hw.func_caps.num_msix_vectors_vf) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
for (i = 0; i < irqmap_info->num_vectors; i++) {
map = &irqmap_info->vecmap[i];
- vector_id = map->vector_id;
- vsi_id = map->vsi_id;
/* validate msg params */
- if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
+ !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+ vsi_id = map->vsi_id;
if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
aq_ret = I40E_ERR_PARAM;
@@ -2232,19 +2489,33 @@
}
/**
+ * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Returns true if validation was successful, else false.
+ */
+static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
+ vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
+ return false;
+
+ return true;
+}
+
+/**
* i40e_vc_enable_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to enable all or specific queue(s)
**/
-static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
- u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
int i;
@@ -2253,12 +2524,12 @@
goto error_param;
}
- if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2294,19 +2565,18 @@
* i40e_vc_disable_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to disable all or specific
* queue(s)
**/
-static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2316,7 +2586,7 @@
goto error_param;
}
- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2395,29 +2665,24 @@
* i40e_vc_request_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* VFs get a default number of queues but can use this message to request a
* different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of
* available queues and return result of sending VF a message.
**/
-static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
+static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
- int req_pairs = vfres->num_queue_pairs;
- int cur_pairs = vf->num_queue_pairs;
+ u16 req_pairs = vfres->num_queue_pairs;
+ u8 cur_pairs = vf->num_queue_pairs;
struct i40e_pf *pf = vf->pf;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
return -EINVAL;
- if (req_pairs <= 0) {
- dev_err(&pf->pdev->dev,
- "VF %d tried to request %d queues. Ignoring.\n",
- vf->vf_id, req_pairs);
- } else if (req_pairs > I40E_MAX_VF_QUEUES) {
+ if (req_pairs > I40E_MAX_VF_QUEUES) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
vf->vf_id,
@@ -2439,8 +2704,7 @@
} else {
/* successful request */
vf->num_req_queues = req_pairs;
- i40e_vc_notify_vf_reset(vf);
- i40e_reset_vf(vf, false);
+ i40e_vc_reset_vf(vf, true);
return 0;
}
@@ -2452,11 +2716,10 @@
* i40e_vc_get_stats_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to get vsi stats
**/
-static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
@@ -2467,7 +2730,7 @@
memset(&stats, 0, sizeof(struct i40e_eth_stats));
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2495,7 +2758,7 @@
* MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
*/
#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
-#define I40E_VC_MAX_VLAN_PER_VF 8
+#define I40E_VC_MAX_VLAN_PER_VF 16
/**
* i40e_check_vf_permission
@@ -2518,20 +2781,12 @@
struct virtchnl_ether_addr_list *al)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
+ int mac2add_cnt = 0;
int i;
- /* If this VF is not privileged, then we can't add more than a limited
- * number of addresses. Check to make sure that the additions do not
- * push us over the limit.
- */
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
- dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
- return -EPERM;
- }
-
for (i = 0; i < al->num_elements; i++) {
+ struct i40e_mac_filter *f;
u8 *addr = al->list[i].addr;
if (is_broadcast_ether_addr(addr) ||
@@ -2552,11 +2807,27 @@
!is_multicast_ether_addr(addr) && vf->pf_set_mac &&
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
- "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
+ "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
return -EPERM;
}
+
+ /*count filters that really will be added*/
+ f = i40e_find_mac(vsi, addr);
+ if (!f)
+ ++mac2add_cnt;
}
+ /* If this VF is not privileged, then we can't add more than a limited
+ * number of addresses. Check to make sure that the additions do not
+ * push us over the limit.
+ */
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+ (i40e_count_filters(vsi) + mac2add_cnt) >
+ I40E_VC_MAX_MAC_ADDR_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ return -EPERM;
+ }
return 0;
}
@@ -2564,22 +2835,20 @@
* i40e_vc_add_mac_addr_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* add guest mac address filter
**/
-static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- u16 vsi_id = al->vsi_id;
i40e_status ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2612,9 +2881,11 @@
ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
- } else {
- vf->num_mac++;
}
+ if (is_valid_ether_addr(al->list[i].addr) &&
+ is_zero_ether_addr(vf->default_lan_addr.addr))
+ ether_addr_copy(vf->default_lan_addr.addr,
+ al->list[i].addr);
}
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -2627,30 +2898,29 @@
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
- ret);
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ ret, NULL, 0);
}
/**
* i40e_vc_del_mac_addr_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* remove guest mac address filter
**/
-static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
+ bool was_unimac_deleted = false;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- u16 vsi_id = al->vsi_id;
i40e_status ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2663,16 +2933,8 @@
ret = I40E_ERR_INVALID_MAC_ADDR;
goto error_param;
}
-
- if (vf->pf_set_mac &&
- ether_addr_equal(al->list[i].addr,
- vf->default_lan_addr.addr)) {
- dev_err(&pf->pdev->dev,
- "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n",
- vf->default_lan_addr.addr, vf->vf_id);
- ret = I40E_ERR_PARAM;
- goto error_param;
- }
+ if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
+ was_unimac_deleted = true;
}
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2683,8 +2945,6 @@
ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
- } else {
- vf->num_mac--;
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -2695,27 +2955,40 @@
dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
vf->vf_id, ret);
+ if (vf->trusted && was_unimac_deleted) {
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ u8 *macaddr = NULL;
+ int bkt;
+
+ /* set last unicast mac address as default */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (is_valid_ether_addr(f->macaddr))
+ macaddr = f->macaddr;
+ }
+ if (macaddr)
+ ether_addr_copy(vf->default_lan_addr.addr, macaddr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ }
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
- ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
}
/**
* i40e_vc_add_vlan_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* program guest vlan id
**/
-static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- u16 vsi_id = vfl->vsi_id;
i40e_status aq_ret = 0;
int i;
@@ -2726,7 +2999,7 @@
goto error_param;
}
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2778,22 +3051,20 @@
* i40e_vc_remove_vlan_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* remove programmed guest vlan id
**/
-static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- u16 vsi_id = vfl->vsi_id;
i40e_status aq_ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2807,7 +3078,8 @@
vsi = pf->vsi[vf->lan_vsi_idx];
if (vsi->info.pvid) {
- aq_ret = I40E_ERR_PARAM;
+ if (vfl->num_elements > 1 || vfl->vlan_id[0])
+ aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2865,13 +3137,11 @@
* i40e_vc_iwarp_qvmap_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
* @config: config qvmap or release it
*
* called from the VF for the iwarp msgs
**/
-static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
- bool config)
+static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
{
struct virtchnl_iwarp_qvlist_info *qvlist_info =
(struct virtchnl_iwarp_qvlist_info *)msg;
@@ -2902,22 +3172,20 @@
* i40e_vc_config_rss_key
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* Configure the VF's RSS key
**/
-static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- u16 vsi_id = vrk->vsi_id;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
- (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
+ vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -2934,25 +3202,30 @@
* i40e_vc_config_rss_lut
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* Configure the VF's RSS LUT
**/
-static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_rss_lut *vrl =
(struct virtchnl_rss_lut *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- u16 vsi_id = vrl->vsi_id;
i40e_status aq_ret = 0;
+ u16 i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
- (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
+ vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
+
+ for (i = 0; i < vrl->lut_entries; i++)
+ if (vrl->lut[i] >= vf->num_queue_pairs) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
vsi = pf->vsi[vf->lan_vsi_idx];
aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
@@ -2966,18 +3239,17 @@
* i40e_vc_get_rss_hena
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* Return the RSS HENA bits allowed by the hardware
**/
-static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_rss_hena *vrh = NULL;
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
int len = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3002,11 +3274,10 @@
* i40e_vc_set_rss_hena
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* Set the RSS HENA bits for the VF
**/
-static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_rss_hena *vrh =
(struct virtchnl_rss_hena *)msg;
@@ -3014,7 +3285,7 @@
struct i40e_hw *hw = &pf->hw;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3031,21 +3302,20 @@
* i40e_vc_enable_vlan_stripping
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* Enable vlan header stripping for the VF
**/
-static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
- u16 msglen)
+static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
- struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
+ vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_vlan_stripping_enable(vsi);
/* send the response to the VF */
@@ -3058,21 +3328,20 @@
* i40e_vc_disable_vlan_stripping
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* Disable vlan header stripping for the VF
**/
-static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
- u16 msglen)
+static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
- struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
+ vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_vlan_stripping_disable(vsi);
/* send the response to the VF */
@@ -3083,8 +3352,8 @@
/**
* i40e_validate_cloud_filter
- * @mask: mask for TC filter
- * @data: data for TC filter
+ * @vf: pointer to VF structure
+ * @tc_filter: pointer to filter requested
*
* This function validates cloud filter programmed as TC filter for ADq
**/
@@ -3179,7 +3448,7 @@
}
if (mask.dst_port & data.dst_port) {
- if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) {
+ if (!data.dst_port) {
dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
vf->vf_id);
goto err;
@@ -3187,7 +3456,7 @@
}
if (mask.src_port & data.src_port) {
- if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) {
+ if (!data.src_port) {
dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
vf->vf_id);
goto err;
@@ -3217,7 +3486,7 @@
/**
* i40e_find_vsi_from_seid - searches for the vsi with the given seid
* @vf: pointer to the VF info
- * @seid - seid of the vsi it is searching for
+ * @seid: seid of the vsi it is searching for
**/
static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
{
@@ -3294,7 +3563,7 @@
i40e_status aq_ret = 0;
int i, ret;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3425,7 +3694,7 @@
i40e_status aq_ret = 0;
int i, ret;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err_out;
}
@@ -3530,10 +3799,11 @@
(struct virtchnl_tc_info *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
- int i, adq_request_qps = 0, speed = 0;
+ int i, adq_request_qps = 0;
i40e_status aq_ret = 0;
+ u64 speed = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3557,8 +3827,8 @@
/* max number of traffic classes for VF currently capped at 4 */
if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
dev_err(&pf->pdev->dev,
- "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n",
- vf->vf_id, tci->num_tc);
+ "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
+ vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3568,8 +3838,9 @@
if (!tci->list[i].count ||
tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
dev_err(&pf->pdev->dev,
- "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n",
- vf->vf_id, i, tci->list[i].count);
+ "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
+ vf->vf_id, i, tci->list[i].count,
+ I40E_DEFAULT_QUEUES_PER_VF);
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3641,8 +3912,7 @@
vf->adq_enabled = true;
/* reset the VF in order to allocate resources */
- i40e_vc_notify_vf_reset(vf);
- i40e_reset_vf(vf, false);
+ i40e_vc_reset_vf(vf, true);
return I40E_SUCCESS;
@@ -3662,7 +3932,7 @@
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3682,8 +3952,7 @@
}
/* reset the VF in order to allocate resources */
- i40e_vc_notify_vf_reset(vf);
- i40e_reset_vf(vf, false);
+ i40e_vc_reset_vf(vf, true);
return I40E_SUCCESS;
@@ -3713,7 +3982,7 @@
int ret;
pf->vf_aq_requests++;
- if (local_vf_id >= pf->num_alloc_vfs)
+ if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
return -EINVAL;
vf = &(pf->vf[local_vf_id]);
@@ -3724,25 +3993,12 @@
/* perform basic checks on the msg */
ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
- /* perform additional checks specific to this driver */
- if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
- struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
-
- if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
- ret = -EINVAL;
- } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
-
- if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
- ret = -EINVAL;
- }
-
if (ret) {
i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
local_vf_id, v_opcode, msglen);
switch (ret) {
- case VIRTCHNL_ERR_PARAM:
+ case VIRTCHNL_STATUS_ERR_PARAM:
return -EPERM;
default:
return -EINVAL;
@@ -3758,69 +4014,69 @@
i40e_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
- i40e_vc_reset_vf_msg(vf);
+ i40e_vc_reset_vf(vf, false);
ret = 0;
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
+ ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- ret = i40e_vc_config_queues_msg(vf, msg, msglen);
+ ret = i40e_vc_config_queues_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
- ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
+ ret = i40e_vc_config_irq_map_msg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
- ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
+ ret = i40e_vc_enable_queues_msg(vf, msg);
i40e_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_DISABLE_QUEUES:
- ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
+ ret = i40e_vc_disable_queues_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
- ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
+ ret = i40e_vc_add_mac_addr_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_ETH_ADDR:
- ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
+ ret = i40e_vc_del_mac_addr_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_VLAN:
- ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
+ ret = i40e_vc_add_vlan_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_VLAN:
- ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
+ ret = i40e_vc_remove_vlan_msg(vf, msg);
break;
case VIRTCHNL_OP_GET_STATS:
- ret = i40e_vc_get_stats_msg(vf, msg, msglen);
+ ret = i40e_vc_get_stats_msg(vf, msg);
break;
case VIRTCHNL_OP_IWARP:
ret = i40e_vc_iwarp_msg(vf, msg, msglen);
break;
case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
+ ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
break;
case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
- ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
+ ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
- ret = i40e_vc_config_rss_key(vf, msg, msglen);
+ ret = i40e_vc_config_rss_key(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
- ret = i40e_vc_config_rss_lut(vf, msg, msglen);
+ ret = i40e_vc_config_rss_lut(vf, msg);
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- ret = i40e_vc_get_rss_hena(vf, msg, msglen);
+ ret = i40e_vc_get_rss_hena(vf, msg);
break;
case VIRTCHNL_OP_SET_RSS_HENA:
- ret = i40e_vc_set_rss_hena(vf, msg, msglen);
+ ret = i40e_vc_set_rss_hena(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
- ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
+ ret = i40e_vc_enable_vlan_stripping(vf, msg);
break;
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
- ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
+ ret = i40e_vc_disable_vlan_stripping(vf, msg);
break;
case VIRTCHNL_OP_REQUEST_QUEUES:
- ret = i40e_vc_request_queues_msg(vf, msg, msglen);
+ ret = i40e_vc_request_queues_msg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_CHANNELS:
ret = i40e_vc_add_qch_msg(vf, msg);
@@ -3889,6 +4145,35 @@
}
/**
+ * i40e_validate_vf
+ * @pf: the physical function
+ * @vf_id: VF identifier
+ *
+ * Check that the VF is enabled and the VSI exists.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
+{
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev,
+ "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto err_out;
+ }
+ vf = &pf->vf[vf_id];
+ vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
+ if (!vsi)
+ ret = -EINVAL;
+err_out:
+ return ret;
+}
+
+/**
* i40e_ndo_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -3908,20 +4193,23 @@
int bkt;
u8 i;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev,
- "Invalid VF Identifier %d\n", vf_id);
- ret = -EINVAL;
- goto error_param;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
}
- vf = &(pf->vf[vf_id]);
- vsi = pf->vsi[vf->lan_vsi_idx];
+ /* validate the request */
+ ret = i40e_validate_vf(pf, vf_id);
+ if (ret)
+ goto error_param;
+
+ vf = &pf->vf[vf_id];
/* When the VF is resetting wait until it is done.
* It can take up to 200 milliseconds,
* but wait for up to 300 milliseconds to be safe.
+ * Acquire the VSI pointer only after the VF has been
+ * properly initialized.
*/
for (i = 0; i < 15; i++) {
if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
@@ -3934,6 +4222,7 @@
ret = -EAGAIN;
goto error_param;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
@@ -3976,11 +4265,14 @@
mac, vf_id);
}
- /* Force the VF driver stop so it has to reload with new MAC address */
- i40e_vc_disable_vf(vf);
- dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
+ /* Force the VF interface down so it has to bring up with new MAC
+ * address
+ */
+ i40e_vc_reset_vf(vf, true);
+ dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
error_param:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -3999,17 +4291,21 @@
{
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ bool allmulti = false, alluni = false;
struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi;
struct i40e_vf *vf;
int ret = 0;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
- ret = -EINVAL;
- goto error_pvid;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
}
+
+ /* validate the request */
+ ret = i40e_validate_vf(pf, vf_id);
+ if (ret)
+ goto error_pvid;
if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
@@ -4023,7 +4319,7 @@
goto error_pvid;
}
- vf = &(pf->vf[vf_id]);
+ vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
@@ -4036,7 +4332,7 @@
/* duplicate request, so just return success */
goto error_pvid;
- i40e_vc_disable_vf(vf);
+ i40e_vc_reset_vf(vf, true);
/* During reset the VF got a new VSI, so refresh a pointer. */
vsi = pf->vsi[vf->lan_vsi_idx];
/* Locked once because multiple functions below iterate list */
@@ -4070,6 +4366,15 @@
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* disable promisc modes in case they were enabled */
+ ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
+ allmulti, alluni);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
+ goto error_pvid;
+ }
+
if (vlan_id || qos)
ret = i40e_vsi_add_pvid(vsi, vlanprio);
else
@@ -4096,6 +4401,12 @@
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
+ alluni = true;
+
+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
+ allmulti = true;
+
/* Schedule the worker thread to take care of applying changes */
i40e_service_event_schedule(vsi->back);
@@ -4108,9 +4419,17 @@
* default LAN MAC address.
*/
vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+
+ ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
+ goto error_pvid;
+ }
+
ret = 0;
error_pvid:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4132,20 +4451,24 @@
struct i40e_vf *vf;
int ret = 0;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
- ret = -EINVAL;
- goto error;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
}
+
+ /* validate the request */
+ ret = i40e_validate_vf(pf, vf_id);
+ if (ret)
+ goto error;
if (min_tx_rate) {
dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
min_tx_rate, vf_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
- vf = &(pf->vf[vf_id]);
+ vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
@@ -4160,6 +4483,7 @@
vf->tx_rate = max_tx_rate;
error:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4180,20 +4504,21 @@
struct i40e_vf *vf;
int ret = 0;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
- ret = -EINVAL;
- goto error_param;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
}
- vf = &(pf->vf[vf_id]);
+ /* validate the request */
+ ret = i40e_validate_vf(pf, vf_id);
+ if (ret)
+ goto error_param;
+
+ vf = &pf->vf[vf_id];
/* first vsi is always the LAN vsi */
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
- vf_id);
- ret = -EAGAIN;
+ if (!vsi) {
+ ret = -ENOENT;
goto error_param;
}
@@ -4217,6 +4542,7 @@
ret = 0;
error_param:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4237,6 +4563,11 @@
struct i40e_vf *vf;
int abs_vf_id;
int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
@@ -4281,6 +4612,7 @@
0, (u8 *)&pfe, sizeof(pfe), NULL);
error_out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4301,6 +4633,11 @@
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
@@ -4335,6 +4672,7 @@
ret = -EIO;
}
out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4353,15 +4691,22 @@
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
vf = &pf->vf[vf_id];
@@ -4370,7 +4715,7 @@
goto out;
vf->trusted = setting;
- i40e_vc_disable_vf(vf);
+ i40e_vc_reset_vf(vf, true);
dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
vf_id, setting ? "" : "un");
@@ -4384,5 +4729,54 @@
}
out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
+
+/**
+ * i40e_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-127)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_eth_stats *stats;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+
+ /* validate the request */
+ if (i40e_validate_vf(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi)
+ return -EINVAL;
+
+ i40e_update_eth_stats(vsi);
+ stats = &vsi->eth_stats;
+
+ memset(vf_stats, 0, sizeof(*vf_stats));
+
+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+ stats->rx_multicast;
+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+ stats->tx_multicast;
+ vf_stats->rx_bytes = stats->rx_bytes;
+ vf_stats->tx_bytes = stats->tx_bytes;
+ vf_stats->broadcast = stats->rx_broadcast;
+ vf_stats->multicast = stats->rx_multicast;
+ vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->tx_dropped = stats->tx_discards;
+
+ return 0;
+}
--
Gitblit v1.6.2