| .. | .. |
|---|
| 15 | 15 | |
|---|
| 16 | 16 | #define RDS_IB_DEFAULT_RECV_WR 1024 |
|---|
| 17 | 17 | #define RDS_IB_DEFAULT_SEND_WR 256 |
|---|
| 18 | | -#define RDS_IB_DEFAULT_FR_WR 256 |
|---|
| 19 | | -#define RDS_IB_DEFAULT_FR_INV_WR 256 |
|---|
| 18 | +#define RDS_IB_DEFAULT_FR_WR 512 |
|---|
| 20 | 19 | |
|---|
| 21 | 20 | #define RDS_IB_DEFAULT_RETRY_COUNT 1 |
|---|
| 22 | 21 | |
|---|
| .. | .. |
|---|
| 67 | 66 | u8 ricpc_protocol_major; |
|---|
| 68 | 67 | u8 ricpc_protocol_minor; |
|---|
| 69 | 68 | __be16 ricpc_protocol_minor_mask; /* bitmask */ |
|---|
| 70 | | - __be32 ricpc_reserved1; |
|---|
| 69 | + u8 ricpc_dp_toss; |
|---|
| 70 | + u8 ripc_reserved1; |
|---|
| 71 | + __be16 ripc_reserved2; |
|---|
| 71 | 72 | __be64 ricpc_ack_seq; |
|---|
| 72 | 73 | __be32 ricpc_credit; /* non-zero enables flow ctl */ |
|---|
| 73 | 74 | }; |
|---|
| .. | .. |
|---|
| 155 | 156 | |
|---|
| 156 | 157 | /* To control the number of wrs from fastreg */ |
|---|
| 157 | 158 | atomic_t i_fastreg_wrs; |
|---|
| 158 | | - atomic_t i_fastunreg_wrs; |
|---|
| 159 | + atomic_t i_fastreg_inuse_count; |
|---|
| 159 | 160 | |
|---|
| 160 | 161 | /* interrupt handling */ |
|---|
| 161 | 162 | struct tasklet_struct i_send_tasklet; |
|---|
| .. | .. |
|---|
| 164 | 165 | /* tx */ |
|---|
| 165 | 166 | struct rds_ib_work_ring i_send_ring; |
|---|
| 166 | 167 | struct rm_data_op *i_data_op; |
|---|
| 167 | | - struct rds_header *i_send_hdrs; |
|---|
| 168 | | - dma_addr_t i_send_hdrs_dma; |
|---|
| 168 | + struct rds_header **i_send_hdrs; |
|---|
| 169 | + dma_addr_t *i_send_hdrs_dma; |
|---|
| 169 | 170 | struct rds_ib_send_work *i_sends; |
|---|
| 170 | 171 | atomic_t i_signaled_sends; |
|---|
| 171 | 172 | |
|---|
| .. | .. |
|---|
| 174 | 175 | struct rds_ib_work_ring i_recv_ring; |
|---|
| 175 | 176 | struct rds_ib_incoming *i_ibinc; |
|---|
| 176 | 177 | u32 i_recv_data_rem; |
|---|
| 177 | | - struct rds_header *i_recv_hdrs; |
|---|
| 178 | | - dma_addr_t i_recv_hdrs_dma; |
|---|
| 178 | + struct rds_header **i_recv_hdrs; |
|---|
| 179 | + dma_addr_t *i_recv_hdrs_dma; |
|---|
| 179 | 180 | struct rds_ib_recv_work *i_recvs; |
|---|
| 180 | 181 | u64 i_ack_recv; /* last ACK received */ |
|---|
| 181 | 182 | struct rds_ib_refill_cache i_cache_incs; |
|---|
| .. | .. |
|---|
| 219 | 220 | /* Send/Recv vectors */ |
|---|
| 220 | 221 | int i_scq_vector; |
|---|
| 221 | 222 | int i_rcq_vector; |
|---|
| 223 | + u8 i_sl; |
|---|
| 222 | 224 | }; |
|---|
| 223 | 225 | |
|---|
| 224 | 226 | /* This assumes that atomic_t is at least 32 bits */ |
|---|
| .. | .. |
|---|
| 244 | 246 | struct list_head conn_list; |
|---|
| 245 | 247 | struct ib_device *dev; |
|---|
| 246 | 248 | struct ib_pd *pd; |
|---|
| 247 | | - bool use_fastreg; |
|---|
| 249 | + u8 odp_capable:1; |
|---|
| 248 | 250 | |
|---|
| 249 | 251 | unsigned int max_mrs; |
|---|
| 250 | 252 | struct rds_ib_mr_pool *mr_1m_pool; |
|---|
| 251 | 253 | struct rds_ib_mr_pool *mr_8k_pool; |
|---|
| 252 | | - unsigned int fmr_max_remaps; |
|---|
| 253 | 254 | unsigned int max_8k_mrs; |
|---|
| 254 | 255 | unsigned int max_1m_mrs; |
|---|
| 255 | 256 | int max_sge; |
|---|
| .. | .. |
|---|
| 262 | 263 | int *vector_load; |
|---|
| 263 | 264 | }; |
|---|
| 264 | 265 | |
|---|
| 265 | | -#define ibdev_to_node(ibdev) dev_to_node((ibdev)->dev.parent) |
|---|
| 266 | 266 | #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev) |
|---|
| 267 | 267 | |
|---|
| 268 | 268 | /* bits for i_ack_flags */ |
|---|
| .. | .. |
|---|
| 331 | 331 | unsigned int i; |
|---|
| 332 | 332 | |
|---|
| 333 | 333 | for_each_sg(sglist, sg, sg_dma_len, i) { |
|---|
| 334 | | - ib_dma_sync_single_for_cpu(dev, |
|---|
| 335 | | - ib_sg_dma_address(dev, sg), |
|---|
| 336 | | - ib_sg_dma_len(dev, sg), |
|---|
| 337 | | - direction); |
|---|
| 334 | + ib_dma_sync_single_for_cpu(dev, sg_dma_address(sg), |
|---|
| 335 | + sg_dma_len(sg), direction); |
|---|
| 338 | 336 | } |
|---|
| 339 | 337 | } |
|---|
| 340 | 338 | #define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu |
|---|
| .. | .. |
|---|
| 348 | 346 | unsigned int i; |
|---|
| 349 | 347 | |
|---|
| 350 | 348 | for_each_sg(sglist, sg, sg_dma_len, i) { |
|---|
| 351 | | - ib_dma_sync_single_for_device(dev, |
|---|
| 352 | | - ib_sg_dma_address(dev, sg), |
|---|
| 353 | | - ib_sg_dma_len(dev, sg), |
|---|
| 354 | | - direction); |
|---|
| 349 | + ib_dma_sync_single_for_device(dev, sg_dma_address(sg), |
|---|
| 350 | + sg_dma_len(sg), direction); |
|---|
| 355 | 351 | } |
|---|
| 356 | 352 | } |
|---|
| 357 | 353 | #define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device |
|---|
| .. | .. |
|---|
| 383 | 379 | int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6); |
|---|
| 384 | 380 | void rds_ib_cm_connect_complete(struct rds_connection *conn, |
|---|
| 385 | 381 | struct rdma_cm_event *event); |
|---|
| 386 | | - |
|---|
| 387 | 382 | |
|---|
| 388 | 383 | #define rds_ib_conn_error(conn, fmt...) \ |
|---|
| 389 | 384 | __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt) |
|---|