hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
....@@ -6,6 +6,11 @@
66 #include <crypto/aead.h>
77 #include <linux/if_bridge.h>
88
9
+#define IXGBE_IPSEC_KEY_BITS 160
10
+static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
11
+
12
+static void ixgbe_ipsec_del_sa(struct xfrm_state *xs);
13
+
914 /**
1015 * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
1116 * @hw: hw specific details
....@@ -287,6 +292,13 @@
287292 /**
288293 * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
289294 * @adapter: board private structure
295
+ *
296
+ * Reload the HW tables from the SW tables after they've been bashed
297
+ * by a chip reset.
298
+ *
299
+ * Any VF entries are removed from the SW and HW tables since either
300
+ * (a) the VF also gets reset on PF reset and will ask again for the
301
+ * offloads, or (b) the VF has been removed by a change in the num_vfs.
290302 **/
291303 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
292304 {
....@@ -302,26 +314,34 @@
302314 ixgbe_ipsec_clear_hw_tables(adapter);
303315 ixgbe_ipsec_start_engine(adapter);
304316
317
+ /* reload the Rx and Tx keys */
318
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
319
+ struct rx_sa *r = &ipsec->rx_tbl[i];
320
+ struct tx_sa *t = &ipsec->tx_tbl[i];
321
+
322
+ if (r->used) {
323
+ if (r->mode & IXGBE_RXTXMOD_VF)
324
+ ixgbe_ipsec_del_sa(r->xs);
325
+ else
326
+ ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi,
327
+ r->key, r->salt,
328
+ r->mode, r->iptbl_ind);
329
+ }
330
+
331
+ if (t->used) {
332
+ if (t->mode & IXGBE_RXTXMOD_VF)
333
+ ixgbe_ipsec_del_sa(t->xs);
334
+ else
335
+ ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt);
336
+ }
337
+ }
338
+
305339 /* reload the IP addrs */
306340 for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
307341 struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
308342
309343 if (ipsa->used)
310344 ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
311
- }
312
-
313
- /* reload the Rx and Tx keys */
314
- for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
315
- struct rx_sa *rsa = &ipsec->rx_tbl[i];
316
- struct tx_sa *tsa = &ipsec->tx_tbl[i];
317
-
318
- if (rsa->used)
319
- ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
320
- rsa->key, rsa->salt,
321
- rsa->mode, rsa->iptbl_ind);
322
-
323
- if (tsa->used)
324
- ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
325345 }
326346 }
327347
....@@ -379,6 +399,8 @@
379399 rcu_read_lock();
380400 hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
381401 (__force u32)spi) {
402
+ if (rsa->mode & IXGBE_RXTXMOD_VF)
403
+ continue;
382404 if (spi == rsa->xs->id.spi &&
383405 ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
384406 (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
....@@ -405,10 +427,9 @@
405427 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
406428 u32 *mykey, u32 *mysalt)
407429 {
408
- struct net_device *dev = xs->xso.dev;
430
+ struct net_device *dev = xs->xso.real_dev;
409431 unsigned char *key_data;
410432 char *alg_name = NULL;
411
- const char aes_gcm_name[] = "rfc4106(gcm(aes))";
412433 int key_len;
413434
414435 if (!xs->aead) {
....@@ -436,9 +457,9 @@
436457 * we don't need to do any byteswapping.
437458 * 160 accounts for 16 byte key and 4 byte salt
438459 */
439
- if (key_len == 160) {
460
+ if (key_len == IXGBE_IPSEC_KEY_BITS) {
440461 *mysalt = ((u32 *)key_data)[4];
441
- } else if (key_len != 128) {
462
+ } else if (key_len != (IXGBE_IPSEC_KEY_BITS - (sizeof(*mysalt) * 8))) {
442463 netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
443464 return -EINVAL;
444465 } else {
....@@ -456,7 +477,7 @@
456477 **/
457478 static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
458479 {
459
- struct net_device *dev = xs->xso.dev;
480
+ struct net_device *dev = xs->xso.real_dev;
460481 struct ixgbe_adapter *adapter = netdev_priv(dev);
461482 struct ixgbe_hw *hw = &adapter->hw;
462483 u32 mfval, manc, reg;
....@@ -539,7 +560,7 @@
539560 **/
540561 static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
541562 {
542
- struct net_device *dev = xs->xso.dev;
563
+ struct net_device *dev = xs->xso.real_dev;
543564 struct ixgbe_adapter *adapter = netdev_priv(dev);
544565 struct ixgbe_ipsec *ipsec = adapter->ipsec;
545566 struct ixgbe_hw *hw = &adapter->hw;
....@@ -551,6 +572,11 @@
551572 if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
552573 netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
553574 xs->id.proto);
575
+ return -EINVAL;
576
+ }
577
+
578
+ if (xs->props.mode != XFRM_MODE_TRANSPORT) {
579
+ netdev_err(dev, "Unsupported mode for ipsec offload\n");
554580 return -EINVAL;
555581 }
556582
....@@ -724,7 +750,7 @@
724750 **/
725751 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
726752 {
727
- struct net_device *dev = xs->xso.dev;
753
+ struct net_device *dev = xs->xso.real_dev;
728754 struct ixgbe_adapter *adapter = netdev_priv(dev);
729755 struct ixgbe_ipsec *ipsec = adapter->ipsec;
730756 struct ixgbe_hw *hw = &adapter->hw;
....@@ -812,6 +838,227 @@
812838 };
813839
814840 /**
841
+ * ixgbe_ipsec_vf_clear - clear the tables of data for a VF
842
+ * @adapter: board private structure
843
+ * @vf: VF id to be removed
844
+ **/
845
+void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
846
+{
847
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
848
+ int i;
849
+
850
+ if (!ipsec)
851
+ return;
852
+
853
+ /* search rx sa table */
854
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) {
855
+ if (!ipsec->rx_tbl[i].used)
856
+ continue;
857
+ if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
858
+ ipsec->rx_tbl[i].vf == vf)
859
+ ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs);
860
+ }
861
+
862
+ /* search tx sa table */
863
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_tx_sa; i++) {
864
+ if (!ipsec->tx_tbl[i].used)
865
+ continue;
866
+ if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
867
+ ipsec->tx_tbl[i].vf == vf)
868
+ ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs);
869
+ }
870
+}
871
+
872
+/**
873
+ * ixgbe_ipsec_vf_add_sa - translate VF request to SA add
874
+ * @adapter: board private structure
875
+ * @msgbuf: The message buffer
876
+ * @vf: the VF index
877
+ *
878
+ * Make up a new xs and algorithm info from the data sent by the VF.
879
+ * We only need to sketch in just enough to set up the HW offload.
880
+ * Put the resulting offload_handle into the return message to the VF.
881
+ *
882
+ * Returns 0 or error value
883
+ **/
884
+int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
885
+{
886
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
887
+ struct xfrm_algo_desc *algo;
888
+ struct sa_mbx_msg *sam;
889
+ struct xfrm_state *xs;
890
+ size_t aead_len;
891
+ u16 sa_idx;
892
+ u32 pfsa;
893
+ int err;
894
+
895
+ sam = (struct sa_mbx_msg *)(&msgbuf[1]);
896
+ if (!adapter->vfinfo[vf].trusted ||
897
+ !(adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)) {
898
+ e_warn(drv, "VF %d attempted to add an IPsec SA\n", vf);
899
+ err = -EACCES;
900
+ goto err_out;
901
+ }
902
+
903
+ /* Tx IPsec offload doesn't seem to work on this
904
+ * device, so block these requests for now.
905
+ */
906
+ sam->flags = sam->flags & ~XFRM_OFFLOAD_IPV6;
907
+ if (sam->flags != XFRM_OFFLOAD_INBOUND) {
908
+ err = -EOPNOTSUPP;
909
+ goto err_out;
910
+ }
911
+
912
+ xs = kzalloc(sizeof(*xs), GFP_KERNEL);
913
+ if (unlikely(!xs)) {
914
+ err = -ENOMEM;
915
+ goto err_out;
916
+ }
917
+
918
+ xs->xso.flags = sam->flags;
919
+ xs->id.spi = sam->spi;
920
+ xs->id.proto = sam->proto;
921
+ xs->props.family = sam->family;
922
+ if (xs->props.family == AF_INET6)
923
+ memcpy(&xs->id.daddr.a6, sam->addr, sizeof(xs->id.daddr.a6));
924
+ else
925
+ memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
926
+ xs->xso.dev = adapter->netdev;
927
+
928
+ algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
929
+ if (unlikely(!algo)) {
930
+ err = -ENOENT;
931
+ goto err_xs;
932
+ }
933
+
934
+ aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
935
+ xs->aead = kzalloc(aead_len, GFP_KERNEL);
936
+ if (unlikely(!xs->aead)) {
937
+ err = -ENOMEM;
938
+ goto err_xs;
939
+ }
940
+
941
+ xs->props.ealgo = algo->desc.sadb_alg_id;
942
+ xs->geniv = algo->uinfo.aead.geniv;
943
+ xs->aead->alg_icv_len = IXGBE_IPSEC_AUTH_BITS;
944
+ xs->aead->alg_key_len = IXGBE_IPSEC_KEY_BITS;
945
+ memcpy(xs->aead->alg_key, sam->key, sizeof(sam->key));
946
+ memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
947
+
948
+ /* set up the HW offload */
949
+ err = ixgbe_ipsec_add_sa(xs);
950
+ if (err)
951
+ goto err_aead;
952
+
953
+ pfsa = xs->xso.offload_handle;
954
+ if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) {
955
+ sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX;
956
+ ipsec->rx_tbl[sa_idx].vf = vf;
957
+ ipsec->rx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF;
958
+ } else {
959
+ sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
960
+ ipsec->tx_tbl[sa_idx].vf = vf;
961
+ ipsec->tx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF;
962
+ }
963
+
964
+ msgbuf[1] = xs->xso.offload_handle;
965
+
966
+ return 0;
967
+
968
+err_aead:
969
+ kfree_sensitive(xs->aead);
970
+err_xs:
971
+ kfree_sensitive(xs);
972
+err_out:
973
+ msgbuf[1] = err;
974
+ return err;
975
+}
976
+
977
+/**
978
+ * ixgbe_ipsec_vf_del_sa - translate VF request to SA delete
979
+ * @adapter: board private structure
980
+ * @msgbuf: The message buffer
981
+ * @vf: the VF index
982
+ *
983
+ * Given the offload_handle sent by the VF, look for the related SA table
984
+ * entry and use its xs field to call for a delete of the SA.
985
+ *
986
+ * Note: We silently ignore requests to delete entries that are already
987
+ * set to unused because when a VF is set to "DOWN", the PF first
988
+ * gets a reset and clears all the VF's entries; then the VF's
989
+ * XFRM stack sends individual deletes for each entry, which the
990
+ * reset already removed. In the future it might be good to try to
991
+ * optimize this so not so many unnecessary delete messages are sent.
992
+ *
993
+ * Returns 0 or error value
994
+ **/
995
+int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
996
+{
997
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
998
+ struct xfrm_state *xs;
999
+ u32 pfsa = msgbuf[1];
1000
+ u16 sa_idx;
1001
+
1002
+ if (!adapter->vfinfo[vf].trusted) {
1003
+ e_err(drv, "vf %d attempted to delete an SA\n", vf);
1004
+ return -EPERM;
1005
+ }
1006
+
1007
+ if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) {
1008
+ struct rx_sa *rsa;
1009
+
1010
+ sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX;
1011
+ if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) {
1012
+ e_err(drv, "vf %d SA index %d out of range\n",
1013
+ vf, sa_idx);
1014
+ return -EINVAL;
1015
+ }
1016
+
1017
+ rsa = &ipsec->rx_tbl[sa_idx];
1018
+
1019
+ if (!rsa->used)
1020
+ return 0;
1021
+
1022
+ if (!(rsa->mode & IXGBE_RXTXMOD_VF) ||
1023
+ rsa->vf != vf) {
1024
+ e_err(drv, "vf %d bad Rx SA index %d\n", vf, sa_idx);
1025
+ return -ENOENT;
1026
+ }
1027
+
1028
+ xs = ipsec->rx_tbl[sa_idx].xs;
1029
+ } else {
1030
+ struct tx_sa *tsa;
1031
+
1032
+ sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
1033
+ if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) {
1034
+ e_err(drv, "vf %d SA index %d out of range\n",
1035
+ vf, sa_idx);
1036
+ return -EINVAL;
1037
+ }
1038
+
1039
+ tsa = &ipsec->tx_tbl[sa_idx];
1040
+
1041
+ if (!tsa->used)
1042
+ return 0;
1043
+
1044
+ if (!(tsa->mode & IXGBE_RXTXMOD_VF) ||
1045
+ tsa->vf != vf) {
1046
+ e_err(drv, "vf %d bad Tx SA index %d\n", vf, sa_idx);
1047
+ return -ENOENT;
1048
+ }
1049
+
1050
+ xs = ipsec->tx_tbl[sa_idx].xs;
1051
+ }
1052
+
1053
+ ixgbe_ipsec_del_sa(xs);
1054
+
1055
+ /* remove the xs that was made-up in the add request */
1056
+ kfree_sensitive(xs);
1057
+
1058
+ return 0;
1059
+}
1060
+
1061
+/**
8151062 * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
8161063 * @tx_ring: outgoing context
8171064 * @first: current data packet
....@@ -824,11 +1071,13 @@
8241071 struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
8251072 struct ixgbe_ipsec *ipsec = adapter->ipsec;
8261073 struct xfrm_state *xs;
1074
+ struct sec_path *sp;
8271075 struct tx_sa *tsa;
8281076
829
- if (unlikely(!first->skb->sp->len)) {
1077
+ sp = skb_sec_path(first->skb);
1078
+ if (unlikely(!sp->len)) {
8301079 netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
831
- __func__, first->skb->sp->len);
1080
+ __func__, sp->len);
8321081 return 0;
8331082 }
8341083
....@@ -918,6 +1167,7 @@
9181167 struct xfrm_state *xs = NULL;
9191168 struct ipv6hdr *ip6 = NULL;
9201169 struct iphdr *ip4 = NULL;
1170
+ struct sec_path *sp;
9211171 void *daddr;
9221172 __be32 spi;
9231173 u8 *c_hdr;
....@@ -957,12 +1207,12 @@
9571207 if (unlikely(!xs))
9581208 return;
9591209
960
- skb->sp = secpath_dup(skb->sp);
961
- if (unlikely(!skb->sp))
1210
+ sp = secpath_set(skb);
1211
+ if (unlikely(!sp))
9621212 return;
9631213
964
- skb->sp->xvec[skb->sp->len++] = xs;
965
- skb->sp->olen++;
1214
+ sp->xvec[sp->len++] = xs;
1215
+ sp->olen++;
9661216 xo = xfrm_offload(skb);
9671217 xo->flags = CRYPTO_DONE;
9681218 xo->status = CRYPTO_SUCCESS;