forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/ethernet/mellanox/mlx4/mr.c
....@@ -966,189 +966,6 @@
966966 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
967967 }
968968
969
-static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
970
- int npages, u64 iova)
971
-{
972
- int i, page_mask;
973
-
974
- if (npages > fmr->max_pages)
975
- return -EINVAL;
976
-
977
- page_mask = (1 << fmr->page_shift) - 1;
978
-
979
- /* We are getting page lists, so va must be page aligned. */
980
- if (iova & page_mask)
981
- return -EINVAL;
982
-
983
- /* Trust the user not to pass misaligned data in page_list */
984
- if (0)
985
- for (i = 0; i < npages; ++i) {
986
- if (page_list[i] & ~page_mask)
987
- return -EINVAL;
988
- }
989
-
990
- if (fmr->maps >= fmr->max_maps)
991
- return -EINVAL;
992
-
993
- return 0;
994
-}
995
-
996
-int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
997
- int npages, u64 iova, u32 *lkey, u32 *rkey)
998
-{
999
- u32 key;
1000
- int i, err;
1001
-
1002
- err = mlx4_check_fmr(fmr, page_list, npages, iova);
1003
- if (err)
1004
- return err;
1005
-
1006
- ++fmr->maps;
1007
-
1008
- key = key_to_hw_index(fmr->mr.key);
1009
- key += dev->caps.num_mpts;
1010
- *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
1011
-
1012
- *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
1013
-
1014
- /* Make sure MPT status is visible before writing MTT entries */
1015
- wmb();
1016
-
1017
- dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle,
1018
- npages * sizeof(u64), DMA_TO_DEVICE);
1019
-
1020
- for (i = 0; i < npages; ++i)
1021
- fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
1022
-
1023
- dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle,
1024
- npages * sizeof(u64), DMA_TO_DEVICE);
1025
-
1026
- fmr->mpt->key = cpu_to_be32(key);
1027
- fmr->mpt->lkey = cpu_to_be32(key);
1028
- fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
1029
- fmr->mpt->start = cpu_to_be64(iova);
1030
-
1031
- /* Make MTT entries are visible before setting MPT status */
1032
- wmb();
1033
-
1034
- *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
1035
-
1036
- /* Make sure MPT status is visible before consumer can use FMR */
1037
- wmb();
1038
-
1039
- return 0;
1040
-}
1041
-EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
1042
-
1043
-int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
1044
- int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
1045
-{
1046
- struct mlx4_priv *priv = mlx4_priv(dev);
1047
- int err = -ENOMEM;
1048
-
1049
- if (max_maps > dev->caps.max_fmr_maps)
1050
- return -EINVAL;
1051
-
1052
- if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
1053
- return -EINVAL;
1054
-
1055
- /* All MTTs must fit in the same page */
1056
- if (max_pages * sizeof(*fmr->mtts) > PAGE_SIZE)
1057
- return -EINVAL;
1058
-
1059
- fmr->page_shift = page_shift;
1060
- fmr->max_pages = max_pages;
1061
- fmr->max_maps = max_maps;
1062
- fmr->maps = 0;
1063
-
1064
- err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
1065
- page_shift, &fmr->mr);
1066
- if (err)
1067
- return err;
1068
-
1069
- fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
1070
- fmr->mr.mtt.offset,
1071
- &fmr->dma_handle);
1072
-
1073
- if (!fmr->mtts) {
1074
- err = -ENOMEM;
1075
- goto err_free;
1076
- }
1077
-
1078
- return 0;
1079
-
1080
-err_free:
1081
- (void) mlx4_mr_free(dev, &fmr->mr);
1082
- return err;
1083
-}
1084
-EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
1085
-
1086
-int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
1087
-{
1088
- struct mlx4_priv *priv = mlx4_priv(dev);
1089
- int err;
1090
-
1091
- err = mlx4_mr_enable(dev, &fmr->mr);
1092
- if (err)
1093
- return err;
1094
-
1095
- fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
1096
- key_to_hw_index(fmr->mr.key), NULL);
1097
- if (!fmr->mpt)
1098
- return -ENOMEM;
1099
-
1100
- return 0;
1101
-}
1102
-EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
1103
-
1104
-void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
1105
- u32 *lkey, u32 *rkey)
1106
-{
1107
- if (!fmr->maps)
1108
- return;
1109
-
1110
- /* To unmap: it is sufficient to take back ownership from HW */
1111
- *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
1112
-
1113
- /* Make sure MPT status is visible */
1114
- wmb();
1115
-
1116
- fmr->maps = 0;
1117
-}
1118
-EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
1119
-
1120
-int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
1121
-{
1122
- int ret;
1123
-
1124
- if (fmr->maps)
1125
- return -EBUSY;
1126
- if (fmr->mr.enabled == MLX4_MPT_EN_HW) {
1127
- /* In case of FMR was enabled and unmapped
1128
- * make sure to give ownership of MPT back to HW
1129
- * so HW2SW_MPT command will success.
1130
- */
1131
- *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
1132
- /* Make sure MPT status is visible before changing MPT fields */
1133
- wmb();
1134
- fmr->mpt->length = 0;
1135
- fmr->mpt->start = 0;
1136
- /* Make sure MPT data is visible after changing MPT status */
1137
- wmb();
1138
- *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
1139
- /* make sure MPT status is visible */
1140
- wmb();
1141
- }
1142
-
1143
- ret = mlx4_mr_free(dev, &fmr->mr);
1144
- if (ret)
1145
- return ret;
1146
- fmr->mr.enabled = MLX4_MPT_DISABLED;
1147
-
1148
- return 0;
1149
-}
1150
-EXPORT_SYMBOL_GPL(mlx4_fmr_free);
1151
-
1152969 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
1153970 {
1154971 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,