| .. | .. |
|---|
| 39 | 39 | #include <linux/netdevice.h> |
|---|
| 40 | 40 | #include <linux/sched/signal.h> |
|---|
| 41 | 41 | #include <linux/inetdevice.h> |
|---|
| 42 | +#include <linux/inet_diag.h> |
|---|
| 42 | 43 | |
|---|
| 44 | +#include <net/snmp.h> |
|---|
| 43 | 45 | #include <net/tls.h> |
|---|
| 46 | +#include <net/tls_toe.h> |
|---|
| 44 | 47 | |
|---|
| 45 | 48 | MODULE_AUTHOR("Mellanox Technologies"); |
|---|
| 46 | 49 | MODULE_DESCRIPTION("Transport Layer Security Support"); |
|---|
| .. | .. |
|---|
| 53 | 56 | TLS_NUM_PROTS, |
|---|
| 54 | 57 | }; |
|---|
| 55 | 58 | |
|---|
| 56 | | -static struct proto *saved_tcpv6_prot; |
|---|
| 59 | +static const struct proto *saved_tcpv6_prot; |
|---|
| 57 | 60 | static DEFINE_MUTEX(tcpv6_prot_mutex); |
|---|
| 58 | | -static LIST_HEAD(device_list); |
|---|
| 59 | | -static DEFINE_MUTEX(device_mutex); |
|---|
| 61 | +static const struct proto *saved_tcpv4_prot; |
|---|
| 62 | +static DEFINE_MUTEX(tcpv4_prot_mutex); |
|---|
| 60 | 63 | static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; |
|---|
| 61 | | -static struct proto_ops tls_sw_proto_ops; |
|---|
| 64 | +static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; |
|---|
| 65 | +static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], |
|---|
| 66 | + const struct proto *base); |
|---|
| 62 | 67 | |
|---|
| 63 | | -static void update_sk_prot(struct sock *sk, struct tls_context *ctx) |
|---|
| 68 | +void update_sk_prot(struct sock *sk, struct tls_context *ctx) |
|---|
| 64 | 69 | { |
|---|
| 65 | 70 | int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; |
|---|
| 66 | 71 | |
|---|
| 67 | | - sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]; |
|---|
| 72 | + WRITE_ONCE(sk->sk_prot, |
|---|
| 73 | + &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]); |
|---|
| 74 | + WRITE_ONCE(sk->sk_socket->ops, |
|---|
| 75 | + &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]); |
|---|
| 68 | 76 | } |
|---|
| 69 | 77 | |
|---|
| 70 | 78 | int wait_on_pending_writer(struct sock *sk, long *timeo) |
|---|
| .. | .. |
|---|
| 84 | 92 | break; |
|---|
| 85 | 93 | } |
|---|
| 86 | 94 | |
|---|
| 87 | | - if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) |
|---|
| 95 | + if (sk_wait_event(sk, timeo, |
|---|
| 96 | + !READ_ONCE(sk->sk_write_pending), &wait)) |
|---|
| 88 | 97 | break; |
|---|
| 89 | 98 | } |
|---|
| 90 | 99 | remove_wait_queue(sk_sleep(sk), &wait); |
|---|
| .. | .. |
|---|
| 141 | 150 | size = sg->length; |
|---|
| 142 | 151 | } |
|---|
| 143 | 152 | |
|---|
| 144 | | - clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); |
|---|
| 145 | 153 | ctx->in_tcp_sendpages = false; |
|---|
| 146 | | - ctx->sk_write_space(sk); |
|---|
| 147 | 154 | |
|---|
| 148 | 155 | return 0; |
|---|
| 149 | 156 | } |
|---|
| .. | .. |
|---|
| 193 | 200 | return rc; |
|---|
| 194 | 201 | } |
|---|
| 195 | 202 | |
|---|
| 196 | | -int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx, |
|---|
| 197 | | - int flags, long *timeo) |
|---|
| 203 | +int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, |
|---|
| 204 | + int flags) |
|---|
| 198 | 205 | { |
|---|
| 199 | 206 | struct scatterlist *sg; |
|---|
| 200 | 207 | u16 offset; |
|---|
| 201 | | - |
|---|
| 202 | | - if (!tls_is_partially_sent_record(ctx)) |
|---|
| 203 | | - return ctx->push_pending_record(sk, flags); |
|---|
| 204 | 208 | |
|---|
| 205 | 209 | sg = ctx->partially_sent_record; |
|---|
| 206 | 210 | offset = ctx->partially_sent_offset; |
|---|
| 207 | 211 | |
|---|
| 208 | 212 | ctx->partially_sent_record = NULL; |
|---|
| 209 | 213 | return tls_push_sg(sk, ctx, sg, offset, flags); |
|---|
| 214 | +} |
|---|
| 215 | + |
|---|
| 216 | +void tls_free_partial_record(struct sock *sk, struct tls_context *ctx) |
|---|
| 217 | +{ |
|---|
| 218 | + struct scatterlist *sg; |
|---|
| 219 | + |
|---|
| 220 | + for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) { |
|---|
| 221 | + put_page(sg_page(sg)); |
|---|
| 222 | + sk_mem_uncharge(sk, sg->length); |
|---|
| 223 | + } |
|---|
| 224 | + ctx->partially_sent_record = NULL; |
|---|
| 210 | 225 | } |
|---|
| 211 | 226 | |
|---|
| 212 | 227 | static void tls_write_space(struct sock *sk) |
|---|
| .. | .. |
|---|
| 222 | 237 | return; |
|---|
| 223 | 238 | } |
|---|
| 224 | 239 | |
|---|
| 225 | | - if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { |
|---|
| 226 | | - gfp_t sk_allocation = sk->sk_allocation; |
|---|
| 227 | | - int rc; |
|---|
| 228 | | - long timeo = 0; |
|---|
| 229 | | - |
|---|
| 230 | | - sk->sk_allocation = GFP_ATOMIC; |
|---|
| 231 | | - rc = tls_push_pending_closed_record(sk, ctx, |
|---|
| 232 | | - MSG_DONTWAIT | |
|---|
| 233 | | - MSG_NOSIGNAL, |
|---|
| 234 | | - &timeo); |
|---|
| 235 | | - sk->sk_allocation = sk_allocation; |
|---|
| 236 | | - |
|---|
| 237 | | - if (rc < 0) |
|---|
| 238 | | - return; |
|---|
| 239 | | - } |
|---|
| 240 | +#ifdef CONFIG_TLS_DEVICE |
|---|
| 241 | + if (ctx->tx_conf == TLS_HW) |
|---|
| 242 | + tls_device_write_space(sk, ctx); |
|---|
| 243 | + else |
|---|
| 244 | +#endif |
|---|
| 245 | + tls_sw_write_space(sk, ctx); |
|---|
| 240 | 246 | |
|---|
| 241 | 247 | ctx->sk_write_space(sk); |
|---|
| 242 | 248 | } |
|---|
| 243 | 249 | |
|---|
| 244 | | -void tls_ctx_free(struct tls_context *ctx) |
|---|
| 250 | +/** |
|---|
| 251 | + * tls_ctx_free() - free TLS ULP context |
|---|
| 252 | + * @sk: socket to with @ctx is attached |
|---|
| 253 | + * @ctx: TLS context structure |
|---|
| 254 | + * |
|---|
| 255 | + * Free TLS context. If @sk is %NULL caller guarantees that the socket |
|---|
| 256 | + * to which @ctx was attached has no outstanding references. |
|---|
| 257 | + */ |
|---|
| 258 | +void tls_ctx_free(struct sock *sk, struct tls_context *ctx) |
|---|
| 245 | 259 | { |
|---|
| 246 | 260 | if (!ctx) |
|---|
| 247 | 261 | return; |
|---|
| 248 | 262 | |
|---|
| 249 | 263 | memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); |
|---|
| 250 | 264 | memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); |
|---|
| 251 | | - kfree(ctx); |
|---|
| 265 | + mutex_destroy(&ctx->tx_lock); |
|---|
| 266 | + |
|---|
| 267 | + if (sk) |
|---|
| 268 | + kfree_rcu(ctx, rcu); |
|---|
| 269 | + else |
|---|
| 270 | + kfree(ctx); |
|---|
| 252 | 271 | } |
|---|
| 253 | 272 | |
|---|
| 254 | | -static void tls_sk_proto_close(struct sock *sk, long timeout) |
|---|
| 273 | +static void tls_sk_proto_cleanup(struct sock *sk, |
|---|
| 274 | + struct tls_context *ctx, long timeo) |
|---|
| 255 | 275 | { |
|---|
| 256 | | - struct tls_context *ctx = tls_get_ctx(sk); |
|---|
| 257 | | - long timeo = sock_sndtimeo(sk, 0); |
|---|
| 258 | | - void (*sk_proto_close)(struct sock *sk, long timeout); |
|---|
| 259 | | - bool free_ctx = false; |
|---|
| 260 | | - |
|---|
| 261 | | - lock_sock(sk); |
|---|
| 262 | | - sk_proto_close = ctx->sk_proto_close; |
|---|
| 263 | | - |
|---|
| 264 | | - if ((ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) || |
|---|
| 265 | | - (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE)) { |
|---|
| 266 | | - free_ctx = true; |
|---|
| 267 | | - goto skip_tx_cleanup; |
|---|
| 268 | | - } |
|---|
| 269 | | - |
|---|
| 270 | | - if (!tls_complete_pending_work(sk, ctx, 0, &timeo)) |
|---|
| 276 | + if (unlikely(sk->sk_write_pending) && |
|---|
| 277 | + !wait_on_pending_writer(sk, &timeo)) |
|---|
| 271 | 278 | tls_handle_open_record(sk, 0); |
|---|
| 272 | | - |
|---|
| 273 | | - if (ctx->partially_sent_record) { |
|---|
| 274 | | - struct scatterlist *sg = ctx->partially_sent_record; |
|---|
| 275 | | - |
|---|
| 276 | | - while (1) { |
|---|
| 277 | | - put_page(sg_page(sg)); |
|---|
| 278 | | - sk_mem_uncharge(sk, sg->length); |
|---|
| 279 | | - |
|---|
| 280 | | - if (sg_is_last(sg)) |
|---|
| 281 | | - break; |
|---|
| 282 | | - sg++; |
|---|
| 283 | | - } |
|---|
| 284 | | - } |
|---|
| 285 | 279 | |
|---|
| 286 | 280 | /* We need these for tls_sw_fallback handling of other packets */ |
|---|
| 287 | 281 | if (ctx->tx_conf == TLS_SW) { |
|---|
| 288 | 282 | kfree(ctx->tx.rec_seq); |
|---|
| 289 | 283 | kfree(ctx->tx.iv); |
|---|
| 290 | | - tls_sw_free_resources_tx(sk); |
|---|
| 284 | + tls_sw_release_resources_tx(sk); |
|---|
| 285 | + TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW); |
|---|
| 286 | + } else if (ctx->tx_conf == TLS_HW) { |
|---|
| 287 | + tls_device_free_resources_tx(sk); |
|---|
| 288 | + TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE); |
|---|
| 291 | 289 | } |
|---|
| 292 | 290 | |
|---|
| 293 | | - if (ctx->rx_conf == TLS_SW) |
|---|
| 294 | | - tls_sw_free_resources_rx(sk); |
|---|
| 295 | | - |
|---|
| 296 | | -#ifdef CONFIG_TLS_DEVICE |
|---|
| 297 | | - if (ctx->rx_conf == TLS_HW) |
|---|
| 291 | + if (ctx->rx_conf == TLS_SW) { |
|---|
| 292 | + tls_sw_release_resources_rx(sk); |
|---|
| 293 | + TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW); |
|---|
| 294 | + } else if (ctx->rx_conf == TLS_HW) { |
|---|
| 298 | 295 | tls_device_offload_cleanup_rx(sk); |
|---|
| 299 | | - |
|---|
| 300 | | - if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) { |
|---|
| 301 | | -#else |
|---|
| 302 | | - { |
|---|
| 303 | | -#endif |
|---|
| 304 | | - if (sk->sk_write_space == tls_write_space) |
|---|
| 305 | | - sk->sk_write_space = ctx->sk_write_space; |
|---|
| 306 | | - tls_ctx_free(ctx); |
|---|
| 307 | | - ctx = NULL; |
|---|
| 296 | + TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE); |
|---|
| 308 | 297 | } |
|---|
| 309 | | - |
|---|
| 310 | | -skip_tx_cleanup: |
|---|
| 311 | | - release_sock(sk); |
|---|
| 312 | | - sk_proto_close(sk, timeout); |
|---|
| 313 | | - /* free ctx for TLS_HW_RECORD, used by tcp_set_state |
|---|
| 314 | | - * for sk->sk_prot->unhash [tls_hw_unhash] |
|---|
| 315 | | - */ |
|---|
| 316 | | - if (free_ctx) |
|---|
| 317 | | - tls_ctx_free(ctx); |
|---|
| 318 | 298 | } |
|---|
| 319 | 299 | |
|---|
| 320 | | -static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, |
|---|
| 321 | | - int __user *optlen) |
|---|
| 300 | +static void tls_sk_proto_close(struct sock *sk, long timeout) |
|---|
| 301 | +{ |
|---|
| 302 | + struct inet_connection_sock *icsk = inet_csk(sk); |
|---|
| 303 | + struct tls_context *ctx = tls_get_ctx(sk); |
|---|
| 304 | + long timeo = sock_sndtimeo(sk, 0); |
|---|
| 305 | + bool free_ctx; |
|---|
| 306 | + |
|---|
| 307 | + if (ctx->tx_conf == TLS_SW) |
|---|
| 308 | + tls_sw_cancel_work_tx(ctx); |
|---|
| 309 | + |
|---|
| 310 | + lock_sock(sk); |
|---|
| 311 | + free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW; |
|---|
| 312 | + |
|---|
| 313 | + if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE) |
|---|
| 314 | + tls_sk_proto_cleanup(sk, ctx, timeo); |
|---|
| 315 | + |
|---|
| 316 | + write_lock_bh(&sk->sk_callback_lock); |
|---|
| 317 | + if (free_ctx) |
|---|
| 318 | + rcu_assign_pointer(icsk->icsk_ulp_data, NULL); |
|---|
| 319 | + WRITE_ONCE(sk->sk_prot, ctx->sk_proto); |
|---|
| 320 | + if (sk->sk_write_space == tls_write_space) |
|---|
| 321 | + sk->sk_write_space = ctx->sk_write_space; |
|---|
| 322 | + write_unlock_bh(&sk->sk_callback_lock); |
|---|
| 323 | + release_sock(sk); |
|---|
| 324 | + if (ctx->tx_conf == TLS_SW) |
|---|
| 325 | + tls_sw_free_ctx_tx(ctx); |
|---|
| 326 | + if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) |
|---|
| 327 | + tls_sw_strparser_done(ctx); |
|---|
| 328 | + if (ctx->rx_conf == TLS_SW) |
|---|
| 329 | + tls_sw_free_ctx_rx(ctx); |
|---|
| 330 | + ctx->sk_proto->close(sk, timeout); |
|---|
| 331 | + |
|---|
| 332 | + if (free_ctx) |
|---|
| 333 | + tls_ctx_free(sk, ctx); |
|---|
| 334 | +} |
|---|
| 335 | + |
|---|
| 336 | +static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, |
|---|
| 337 | + int __user *optlen, int tx) |
|---|
| 322 | 338 | { |
|---|
| 323 | 339 | int rc = 0; |
|---|
| 324 | 340 | struct tls_context *ctx = tls_get_ctx(sk); |
|---|
| 325 | 341 | struct tls_crypto_info *crypto_info; |
|---|
| 342 | + struct cipher_context *cctx; |
|---|
| 326 | 343 | int len; |
|---|
| 327 | 344 | |
|---|
| 328 | 345 | if (get_user(len, optlen)) |
|---|
| .. | .. |
|---|
| 339 | 356 | } |
|---|
| 340 | 357 | |
|---|
| 341 | 358 | /* get user crypto info */ |
|---|
| 342 | | - crypto_info = &ctx->crypto_send.info; |
|---|
| 359 | + if (tx) { |
|---|
| 360 | + crypto_info = &ctx->crypto_send.info; |
|---|
| 361 | + cctx = &ctx->tx; |
|---|
| 362 | + } else { |
|---|
| 363 | + crypto_info = &ctx->crypto_recv.info; |
|---|
| 364 | + cctx = &ctx->rx; |
|---|
| 365 | + } |
|---|
| 343 | 366 | |
|---|
| 344 | 367 | if (!TLS_CRYPTO_INFO_READY(crypto_info)) { |
|---|
| 345 | 368 | rc = -EBUSY; |
|---|
| .. | .. |
|---|
| 364 | 387 | rc = -EINVAL; |
|---|
| 365 | 388 | goto out; |
|---|
| 366 | 389 | } |
|---|
| 367 | | - lock_sock(sk); |
|---|
| 368 | 390 | memcpy(crypto_info_aes_gcm_128->iv, |
|---|
| 369 | | - ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, |
|---|
| 391 | + cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, |
|---|
| 370 | 392 | TLS_CIPHER_AES_GCM_128_IV_SIZE); |
|---|
| 371 | | - memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq, |
|---|
| 393 | + memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq, |
|---|
| 372 | 394 | TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); |
|---|
| 373 | | - release_sock(sk); |
|---|
| 374 | 395 | if (copy_to_user(optval, |
|---|
| 375 | 396 | crypto_info_aes_gcm_128, |
|---|
| 376 | 397 | sizeof(*crypto_info_aes_gcm_128))) |
|---|
| 398 | + rc = -EFAULT; |
|---|
| 399 | + break; |
|---|
| 400 | + } |
|---|
| 401 | + case TLS_CIPHER_AES_GCM_256: { |
|---|
| 402 | + struct tls12_crypto_info_aes_gcm_256 * |
|---|
| 403 | + crypto_info_aes_gcm_256 = |
|---|
| 404 | + container_of(crypto_info, |
|---|
| 405 | + struct tls12_crypto_info_aes_gcm_256, |
|---|
| 406 | + info); |
|---|
| 407 | + |
|---|
| 408 | + if (len != sizeof(*crypto_info_aes_gcm_256)) { |
|---|
| 409 | + rc = -EINVAL; |
|---|
| 410 | + goto out; |
|---|
| 411 | + } |
|---|
| 412 | + memcpy(crypto_info_aes_gcm_256->iv, |
|---|
| 413 | + cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE, |
|---|
| 414 | + TLS_CIPHER_AES_GCM_256_IV_SIZE); |
|---|
| 415 | + memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq, |
|---|
| 416 | + TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE); |
|---|
| 417 | + if (copy_to_user(optval, |
|---|
| 418 | + crypto_info_aes_gcm_256, |
|---|
| 419 | + sizeof(*crypto_info_aes_gcm_256))) |
|---|
| 377 | 420 | rc = -EFAULT; |
|---|
| 378 | 421 | break; |
|---|
| 379 | 422 | } |
|---|
| .. | .. |
|---|
| 390 | 433 | { |
|---|
| 391 | 434 | int rc = 0; |
|---|
| 392 | 435 | |
|---|
| 436 | + lock_sock(sk); |
|---|
| 437 | + |
|---|
| 393 | 438 | switch (optname) { |
|---|
| 394 | 439 | case TLS_TX: |
|---|
| 395 | | - rc = do_tls_getsockopt_tx(sk, optval, optlen); |
|---|
| 440 | + case TLS_RX: |
|---|
| 441 | + rc = do_tls_getsockopt_conf(sk, optval, optlen, |
|---|
| 442 | + optname == TLS_TX); |
|---|
| 396 | 443 | break; |
|---|
| 397 | 444 | default: |
|---|
| 398 | 445 | rc = -ENOPROTOOPT; |
|---|
| 399 | 446 | break; |
|---|
| 400 | 447 | } |
|---|
| 448 | + |
|---|
| 449 | + release_sock(sk); |
|---|
| 450 | + |
|---|
| 401 | 451 | return rc; |
|---|
| 402 | 452 | } |
|---|
| 403 | 453 | |
|---|
| .. | .. |
|---|
| 407 | 457 | struct tls_context *ctx = tls_get_ctx(sk); |
|---|
| 408 | 458 | |
|---|
| 409 | 459 | if (level != SOL_TLS) |
|---|
| 410 | | - return ctx->getsockopt(sk, level, optname, optval, optlen); |
|---|
| 460 | + return ctx->sk_proto->getsockopt(sk, level, |
|---|
| 461 | + optname, optval, optlen); |
|---|
| 411 | 462 | |
|---|
| 412 | 463 | return do_tls_getsockopt(sk, optname, optval, optlen); |
|---|
| 413 | 464 | } |
|---|
| 414 | 465 | |
|---|
| 415 | | -static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, |
|---|
| 466 | +static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, |
|---|
| 416 | 467 | unsigned int optlen, int tx) |
|---|
| 417 | 468 | { |
|---|
| 418 | 469 | struct tls_crypto_info *crypto_info; |
|---|
| 470 | + struct tls_crypto_info *alt_crypto_info; |
|---|
| 419 | 471 | struct tls_context *ctx = tls_get_ctx(sk); |
|---|
| 472 | + size_t optsize; |
|---|
| 420 | 473 | int rc = 0; |
|---|
| 421 | 474 | int conf; |
|---|
| 422 | 475 | |
|---|
| 423 | | - if (!optval || (optlen < sizeof(*crypto_info))) { |
|---|
| 476 | + if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info))) { |
|---|
| 424 | 477 | rc = -EINVAL; |
|---|
| 425 | 478 | goto out; |
|---|
| 426 | 479 | } |
|---|
| 427 | 480 | |
|---|
| 428 | | - if (tx) |
|---|
| 481 | + if (tx) { |
|---|
| 429 | 482 | crypto_info = &ctx->crypto_send.info; |
|---|
| 430 | | - else |
|---|
| 483 | + alt_crypto_info = &ctx->crypto_recv.info; |
|---|
| 484 | + } else { |
|---|
| 431 | 485 | crypto_info = &ctx->crypto_recv.info; |
|---|
| 486 | + alt_crypto_info = &ctx->crypto_send.info; |
|---|
| 487 | + } |
|---|
| 432 | 488 | |
|---|
| 433 | 489 | /* Currently we don't support set crypto info more than one time */ |
|---|
| 434 | 490 | if (TLS_CRYPTO_INFO_READY(crypto_info)) { |
|---|
| .. | .. |
|---|
| 436 | 492 | goto out; |
|---|
| 437 | 493 | } |
|---|
| 438 | 494 | |
|---|
| 439 | | - rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); |
|---|
| 495 | + rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info)); |
|---|
| 440 | 496 | if (rc) { |
|---|
| 441 | 497 | rc = -EFAULT; |
|---|
| 442 | 498 | goto err_crypto_info; |
|---|
| 443 | 499 | } |
|---|
| 444 | 500 | |
|---|
| 445 | 501 | /* check version */ |
|---|
| 446 | | - if (crypto_info->version != TLS_1_2_VERSION) { |
|---|
| 447 | | - rc = -ENOTSUPP; |
|---|
| 502 | + if (crypto_info->version != TLS_1_2_VERSION && |
|---|
| 503 | + crypto_info->version != TLS_1_3_VERSION) { |
|---|
| 504 | + rc = -EINVAL; |
|---|
| 448 | 505 | goto err_crypto_info; |
|---|
| 449 | 506 | } |
|---|
| 450 | 507 | |
|---|
| 451 | | - switch (crypto_info->cipher_type) { |
|---|
| 452 | | - case TLS_CIPHER_AES_GCM_128: { |
|---|
| 453 | | - if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) { |
|---|
| 508 | + /* Ensure that TLS version and ciphers are same in both directions */ |
|---|
| 509 | + if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) { |
|---|
| 510 | + if (alt_crypto_info->version != crypto_info->version || |
|---|
| 511 | + alt_crypto_info->cipher_type != crypto_info->cipher_type) { |
|---|
| 454 | 512 | rc = -EINVAL; |
|---|
| 455 | 513 | goto err_crypto_info; |
|---|
| 456 | 514 | } |
|---|
| 457 | | - rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), |
|---|
| 458 | | - optlen - sizeof(*crypto_info)); |
|---|
| 459 | | - if (rc) { |
|---|
| 460 | | - rc = -EFAULT; |
|---|
| 461 | | - goto err_crypto_info; |
|---|
| 462 | | - } |
|---|
| 515 | + } |
|---|
| 516 | + |
|---|
| 517 | + switch (crypto_info->cipher_type) { |
|---|
| 518 | + case TLS_CIPHER_AES_GCM_128: |
|---|
| 519 | + optsize = sizeof(struct tls12_crypto_info_aes_gcm_128); |
|---|
| 520 | + break; |
|---|
| 521 | + case TLS_CIPHER_AES_GCM_256: { |
|---|
| 522 | + optsize = sizeof(struct tls12_crypto_info_aes_gcm_256); |
|---|
| 463 | 523 | break; |
|---|
| 464 | 524 | } |
|---|
| 525 | + case TLS_CIPHER_AES_CCM_128: |
|---|
| 526 | + optsize = sizeof(struct tls12_crypto_info_aes_ccm_128); |
|---|
| 527 | + break; |
|---|
| 465 | 528 | default: |
|---|
| 466 | 529 | rc = -EINVAL; |
|---|
| 467 | 530 | goto err_crypto_info; |
|---|
| 468 | 531 | } |
|---|
| 469 | 532 | |
|---|
| 533 | + if (optlen != optsize) { |
|---|
| 534 | + rc = -EINVAL; |
|---|
| 535 | + goto err_crypto_info; |
|---|
| 536 | + } |
|---|
| 537 | + |
|---|
| 538 | + rc = copy_from_sockptr_offset(crypto_info + 1, optval, |
|---|
| 539 | + sizeof(*crypto_info), |
|---|
| 540 | + optlen - sizeof(*crypto_info)); |
|---|
| 541 | + if (rc) { |
|---|
| 542 | + rc = -EFAULT; |
|---|
| 543 | + goto err_crypto_info; |
|---|
| 544 | + } |
|---|
| 545 | + |
|---|
| 470 | 546 | if (tx) { |
|---|
| 471 | | -#ifdef CONFIG_TLS_DEVICE |
|---|
| 472 | 547 | rc = tls_set_device_offload(sk, ctx); |
|---|
| 473 | 548 | conf = TLS_HW; |
|---|
| 474 | | - if (rc) { |
|---|
| 475 | | -#else |
|---|
| 476 | | - { |
|---|
| 477 | | -#endif |
|---|
| 549 | + if (!rc) { |
|---|
| 550 | + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE); |
|---|
| 551 | + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE); |
|---|
| 552 | + } else { |
|---|
| 478 | 553 | rc = tls_set_sw_offload(sk, ctx, 1); |
|---|
| 554 | + if (rc) |
|---|
| 555 | + goto err_crypto_info; |
|---|
| 556 | + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW); |
|---|
| 557 | + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW); |
|---|
| 479 | 558 | conf = TLS_SW; |
|---|
| 480 | 559 | } |
|---|
| 481 | 560 | } else { |
|---|
| 482 | | -#ifdef CONFIG_TLS_DEVICE |
|---|
| 483 | 561 | rc = tls_set_device_offload_rx(sk, ctx); |
|---|
| 484 | 562 | conf = TLS_HW; |
|---|
| 485 | | - if (rc) { |
|---|
| 486 | | -#else |
|---|
| 487 | | - { |
|---|
| 488 | | -#endif |
|---|
| 563 | + if (!rc) { |
|---|
| 564 | + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE); |
|---|
| 565 | + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE); |
|---|
| 566 | + } else { |
|---|
| 489 | 567 | rc = tls_set_sw_offload(sk, ctx, 0); |
|---|
| 568 | + if (rc) |
|---|
| 569 | + goto err_crypto_info; |
|---|
| 570 | + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW); |
|---|
| 571 | + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW); |
|---|
| 490 | 572 | conf = TLS_SW; |
|---|
| 491 | 573 | } |
|---|
| 574 | + tls_sw_strparser_arm(sk, ctx); |
|---|
| 492 | 575 | } |
|---|
| 493 | | - |
|---|
| 494 | | - if (rc) |
|---|
| 495 | | - goto err_crypto_info; |
|---|
| 496 | 576 | |
|---|
| 497 | 577 | if (tx) |
|---|
| 498 | 578 | ctx->tx_conf = conf; |
|---|
| .. | .. |
|---|
| 502 | 582 | if (tx) { |
|---|
| 503 | 583 | ctx->sk_write_space = sk->sk_write_space; |
|---|
| 504 | 584 | sk->sk_write_space = tls_write_space; |
|---|
| 505 | | - } else { |
|---|
| 506 | | - sk->sk_socket->ops = &tls_sw_proto_ops; |
|---|
| 507 | 585 | } |
|---|
| 508 | 586 | goto out; |
|---|
| 509 | 587 | |
|---|
| .. | .. |
|---|
| 513 | 591 | return rc; |
|---|
| 514 | 592 | } |
|---|
| 515 | 593 | |
|---|
| 516 | | -static int do_tls_setsockopt(struct sock *sk, int optname, |
|---|
| 517 | | - char __user *optval, unsigned int optlen) |
|---|
| 594 | +static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, |
|---|
| 595 | + unsigned int optlen) |
|---|
| 518 | 596 | { |
|---|
| 519 | 597 | int rc = 0; |
|---|
| 520 | 598 | |
|---|
| .. | .. |
|---|
| 534 | 612 | } |
|---|
| 535 | 613 | |
|---|
| 536 | 614 | static int tls_setsockopt(struct sock *sk, int level, int optname, |
|---|
| 537 | | - char __user *optval, unsigned int optlen) |
|---|
| 615 | + sockptr_t optval, unsigned int optlen) |
|---|
| 538 | 616 | { |
|---|
| 539 | 617 | struct tls_context *ctx = tls_get_ctx(sk); |
|---|
| 540 | 618 | |
|---|
| 541 | 619 | if (level != SOL_TLS) |
|---|
| 542 | | - return ctx->setsockopt(sk, level, optname, optval, optlen); |
|---|
| 620 | + return ctx->sk_proto->setsockopt(sk, level, optname, optval, |
|---|
| 621 | + optlen); |
|---|
| 543 | 622 | |
|---|
| 544 | 623 | return do_tls_setsockopt(sk, optname, optval, optlen); |
|---|
| 545 | 624 | } |
|---|
| 546 | 625 | |
|---|
| 547 | | -static struct tls_context *create_ctx(struct sock *sk) |
|---|
| 626 | +struct tls_context *tls_ctx_create(struct sock *sk) |
|---|
| 548 | 627 | { |
|---|
| 549 | 628 | struct inet_connection_sock *icsk = inet_csk(sk); |
|---|
| 550 | 629 | struct tls_context *ctx; |
|---|
| .. | .. |
|---|
| 553 | 632 | if (!ctx) |
|---|
| 554 | 633 | return NULL; |
|---|
| 555 | 634 | |
|---|
| 556 | | - icsk->icsk_ulp_data = ctx; |
|---|
| 557 | | - ctx->setsockopt = sk->sk_prot->setsockopt; |
|---|
| 558 | | - ctx->getsockopt = sk->sk_prot->getsockopt; |
|---|
| 559 | | - ctx->sk_proto_close = sk->sk_prot->close; |
|---|
| 635 | + mutex_init(&ctx->tx_lock); |
|---|
| 636 | + rcu_assign_pointer(icsk->icsk_ulp_data, ctx); |
|---|
| 637 | + ctx->sk_proto = READ_ONCE(sk->sk_prot); |
|---|
| 638 | + ctx->sk = sk; |
|---|
| 560 | 639 | return ctx; |
|---|
| 561 | 640 | } |
|---|
| 562 | 641 | |
|---|
| 563 | | -static int tls_hw_prot(struct sock *sk) |
|---|
| 642 | +static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG], |
|---|
| 643 | + const struct proto_ops *base) |
|---|
| 564 | 644 | { |
|---|
| 565 | | - struct tls_context *ctx; |
|---|
| 566 | | - struct tls_device *dev; |
|---|
| 567 | | - int rc = 0; |
|---|
| 645 | + ops[TLS_BASE][TLS_BASE] = *base; |
|---|
| 568 | 646 | |
|---|
| 569 | | - mutex_lock(&device_mutex); |
|---|
| 570 | | - list_for_each_entry(dev, &device_list, dev_list) { |
|---|
| 571 | | - if (dev->feature && dev->feature(dev)) { |
|---|
| 572 | | - ctx = create_ctx(sk); |
|---|
| 573 | | - if (!ctx) |
|---|
| 574 | | - goto out; |
|---|
| 647 | + ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; |
|---|
| 648 | + ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked; |
|---|
| 575 | 649 | |
|---|
| 576 | | - ctx->hash = sk->sk_prot->hash; |
|---|
| 577 | | - ctx->unhash = sk->sk_prot->unhash; |
|---|
| 578 | | - ctx->sk_proto_close = sk->sk_prot->close; |
|---|
| 579 | | - ctx->rx_conf = TLS_HW_RECORD; |
|---|
| 580 | | - ctx->tx_conf = TLS_HW_RECORD; |
|---|
| 581 | | - update_sk_prot(sk, ctx); |
|---|
| 582 | | - rc = 1; |
|---|
| 583 | | - break; |
|---|
| 650 | + ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE]; |
|---|
| 651 | + ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read; |
|---|
| 652 | + |
|---|
| 653 | + ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE]; |
|---|
| 654 | + ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read; |
|---|
| 655 | + |
|---|
| 656 | +#ifdef CONFIG_TLS_DEVICE |
|---|
| 657 | + ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; |
|---|
| 658 | + ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL; |
|---|
| 659 | + |
|---|
| 660 | + ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ]; |
|---|
| 661 | + ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL; |
|---|
| 662 | + |
|---|
| 663 | + ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ]; |
|---|
| 664 | + |
|---|
| 665 | + ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ]; |
|---|
| 666 | + |
|---|
| 667 | + ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ]; |
|---|
| 668 | + ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL; |
|---|
| 669 | +#endif |
|---|
| 670 | +#ifdef CONFIG_TLS_TOE |
|---|
| 671 | + ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base; |
|---|
| 672 | +#endif |
|---|
| 673 | +} |
|---|
| 674 | + |
|---|
| 675 | +static void tls_build_proto(struct sock *sk) |
|---|
| 676 | +{ |
|---|
| 677 | + int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; |
|---|
| 678 | + struct proto *prot = READ_ONCE(sk->sk_prot); |
|---|
| 679 | + |
|---|
| 680 | + /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ |
|---|
| 681 | + if (ip_ver == TLSV6 && |
|---|
| 682 | + unlikely(prot != smp_load_acquire(&saved_tcpv6_prot))) { |
|---|
| 683 | + mutex_lock(&tcpv6_prot_mutex); |
|---|
| 684 | + if (likely(prot != saved_tcpv6_prot)) { |
|---|
| 685 | + build_protos(tls_prots[TLSV6], prot); |
|---|
| 686 | + build_proto_ops(tls_proto_ops[TLSV6], |
|---|
| 687 | + sk->sk_socket->ops); |
|---|
| 688 | + smp_store_release(&saved_tcpv6_prot, prot); |
|---|
| 584 | 689 | } |
|---|
| 690 | + mutex_unlock(&tcpv6_prot_mutex); |
|---|
| 585 | 691 | } |
|---|
| 586 | | -out: |
|---|
| 587 | | - mutex_unlock(&device_mutex); |
|---|
| 588 | | - return rc; |
|---|
| 589 | | -} |
|---|
| 590 | 692 | |
|---|
| 591 | | -static void tls_hw_unhash(struct sock *sk) |
|---|
| 592 | | -{ |
|---|
| 593 | | - struct tls_context *ctx = tls_get_ctx(sk); |
|---|
| 594 | | - struct tls_device *dev; |
|---|
| 595 | | - |
|---|
| 596 | | - mutex_lock(&device_mutex); |
|---|
| 597 | | - list_for_each_entry(dev, &device_list, dev_list) { |
|---|
| 598 | | - if (dev->unhash) |
|---|
| 599 | | - dev->unhash(dev, sk); |
|---|
| 693 | + if (ip_ver == TLSV4 && |
|---|
| 694 | + unlikely(prot != smp_load_acquire(&saved_tcpv4_prot))) { |
|---|
| 695 | + mutex_lock(&tcpv4_prot_mutex); |
|---|
| 696 | + if (likely(prot != saved_tcpv4_prot)) { |
|---|
| 697 | + build_protos(tls_prots[TLSV4], prot); |
|---|
| 698 | + build_proto_ops(tls_proto_ops[TLSV4], |
|---|
| 699 | + sk->sk_socket->ops); |
|---|
| 700 | + smp_store_release(&saved_tcpv4_prot, prot); |
|---|
| 701 | + } |
|---|
| 702 | + mutex_unlock(&tcpv4_prot_mutex); |
|---|
| 600 | 703 | } |
|---|
| 601 | | - mutex_unlock(&device_mutex); |
|---|
| 602 | | - ctx->unhash(sk); |
|---|
| 603 | | -} |
|---|
| 604 | | - |
|---|
| 605 | | -static int tls_hw_hash(struct sock *sk) |
|---|
| 606 | | -{ |
|---|
| 607 | | - struct tls_context *ctx = tls_get_ctx(sk); |
|---|
| 608 | | - struct tls_device *dev; |
|---|
| 609 | | - int err; |
|---|
| 610 | | - |
|---|
| 611 | | - err = ctx->hash(sk); |
|---|
| 612 | | - mutex_lock(&device_mutex); |
|---|
| 613 | | - list_for_each_entry(dev, &device_list, dev_list) { |
|---|
| 614 | | - if (dev->hash) |
|---|
| 615 | | - err |= dev->hash(dev, sk); |
|---|
| 616 | | - } |
|---|
| 617 | | - mutex_unlock(&device_mutex); |
|---|
| 618 | | - |
|---|
| 619 | | - if (err) |
|---|
| 620 | | - tls_hw_unhash(sk); |
|---|
| 621 | | - return err; |
|---|
| 622 | 704 | } |
|---|
| 623 | 705 | |
|---|
| 624 | 706 | static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], |
|---|
| 625 | | - struct proto *base) |
|---|
| 707 | + const struct proto *base) |
|---|
| 626 | 708 | { |
|---|
| 627 | 709 | prot[TLS_BASE][TLS_BASE] = *base; |
|---|
| 628 | 710 | prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; |
|---|
| .. | .. |
|---|
| 634 | 716 | prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; |
|---|
| 635 | 717 | |
|---|
| 636 | 718 | prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; |
|---|
| 637 | | - prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; |
|---|
| 638 | | - prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; |
|---|
| 719 | + prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; |
|---|
| 720 | + prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; |
|---|
| 721 | + prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; |
|---|
| 639 | 722 | |
|---|
| 640 | 723 | prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; |
|---|
| 641 | | - prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; |
|---|
| 642 | | - prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; |
|---|
| 724 | + prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; |
|---|
| 725 | + prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; |
|---|
| 726 | + prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; |
|---|
| 643 | 727 | |
|---|
| 644 | 728 | #ifdef CONFIG_TLS_DEVICE |
|---|
| 645 | 729 | prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; |
|---|
| .. | .. |
|---|
| 656 | 740 | |
|---|
| 657 | 741 | prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; |
|---|
| 658 | 742 | #endif |
|---|
| 659 | | - |
|---|
| 743 | +#ifdef CONFIG_TLS_TOE |
|---|
| 660 | 744 | prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; |
|---|
| 661 | | - prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash; |
|---|
| 662 | | - prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash; |
|---|
| 663 | | - prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close; |
|---|
| 745 | + prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_toe_hash; |
|---|
| 746 | + prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_toe_unhash; |
|---|
| 747 | +#endif |
|---|
| 664 | 748 | } |
|---|
| 665 | 749 | |
|---|
| 666 | 750 | static int tls_init(struct sock *sk) |
|---|
| 667 | 751 | { |
|---|
| 668 | | - int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; |
|---|
| 669 | 752 | struct tls_context *ctx; |
|---|
| 670 | 753 | int rc = 0; |
|---|
| 671 | 754 | |
|---|
| 672 | | - if (tls_hw_prot(sk)) |
|---|
| 673 | | - goto out; |
|---|
| 755 | + tls_build_proto(sk); |
|---|
| 756 | + |
|---|
| 757 | +#ifdef CONFIG_TLS_TOE |
|---|
| 758 | + if (tls_toe_bypass(sk)) |
|---|
| 759 | + return 0; |
|---|
| 760 | +#endif |
|---|
| 674 | 761 | |
|---|
| 675 | 762 | /* The TLS ulp is currently supported only for TCP sockets |
|---|
| 676 | 763 | * in ESTABLISHED state. |
|---|
| .. | .. |
|---|
| 679 | 766 | * share the ulp context. |
|---|
| 680 | 767 | */ |
|---|
| 681 | 768 | if (sk->sk_state != TCP_ESTABLISHED) |
|---|
| 682 | | - return -ENOTSUPP; |
|---|
| 769 | + return -ENOTCONN; |
|---|
| 683 | 770 | |
|---|
| 684 | 771 | /* allocate tls context */ |
|---|
| 685 | | - ctx = create_ctx(sk); |
|---|
| 772 | + write_lock_bh(&sk->sk_callback_lock); |
|---|
| 773 | + ctx = tls_ctx_create(sk); |
|---|
| 686 | 774 | if (!ctx) { |
|---|
| 687 | 775 | rc = -ENOMEM; |
|---|
| 688 | 776 | goto out; |
|---|
| 689 | | - } |
|---|
| 690 | | - |
|---|
| 691 | | - /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ |
|---|
| 692 | | - if (ip_ver == TLSV6 && |
|---|
| 693 | | - unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { |
|---|
| 694 | | - mutex_lock(&tcpv6_prot_mutex); |
|---|
| 695 | | - if (likely(sk->sk_prot != saved_tcpv6_prot)) { |
|---|
| 696 | | - build_protos(tls_prots[TLSV6], sk->sk_prot); |
|---|
| 697 | | - smp_store_release(&saved_tcpv6_prot, sk->sk_prot); |
|---|
| 698 | | - } |
|---|
| 699 | | - mutex_unlock(&tcpv6_prot_mutex); |
|---|
| 700 | 777 | } |
|---|
| 701 | 778 | |
|---|
| 702 | 779 | ctx->tx_conf = TLS_BASE; |
|---|
| 703 | 780 | ctx->rx_conf = TLS_BASE; |
|---|
| 704 | 781 | update_sk_prot(sk, ctx); |
|---|
| 705 | 782 | out: |
|---|
| 783 | + write_unlock_bh(&sk->sk_callback_lock); |
|---|
| 706 | 784 | return rc; |
|---|
| 707 | 785 | } |
|---|
| 708 | 786 | |
|---|
| 709 | | -void tls_register_device(struct tls_device *device) |
|---|
| 787 | +static void tls_update(struct sock *sk, struct proto *p, |
|---|
| 788 | + void (*write_space)(struct sock *sk)) |
|---|
| 710 | 789 | { |
|---|
| 711 | | - mutex_lock(&device_mutex); |
|---|
| 712 | | - list_add_tail(&device->dev_list, &device_list); |
|---|
| 713 | | - mutex_unlock(&device_mutex); |
|---|
| 714 | | -} |
|---|
| 715 | | -EXPORT_SYMBOL(tls_register_device); |
|---|
| 790 | + struct tls_context *ctx; |
|---|
| 716 | 791 | |
|---|
| 717 | | -void tls_unregister_device(struct tls_device *device) |
|---|
| 718 | | -{ |
|---|
| 719 | | - mutex_lock(&device_mutex); |
|---|
| 720 | | - list_del(&device->dev_list); |
|---|
| 721 | | - mutex_unlock(&device_mutex); |
|---|
| 792 | + ctx = tls_get_ctx(sk); |
|---|
| 793 | + if (likely(ctx)) { |
|---|
| 794 | + ctx->sk_write_space = write_space; |
|---|
| 795 | + ctx->sk_proto = p; |
|---|
| 796 | + } else { |
|---|
| 797 | + /* Pairs with lockless read in sk_clone_lock(). */ |
|---|
| 798 | + WRITE_ONCE(sk->sk_prot, p); |
|---|
| 799 | + sk->sk_write_space = write_space; |
|---|
| 800 | + } |
|---|
| 722 | 801 | } |
|---|
| 723 | | -EXPORT_SYMBOL(tls_unregister_device); |
|---|
| 802 | + |
|---|
| 803 | +static int tls_get_info(const struct sock *sk, struct sk_buff *skb) |
|---|
| 804 | +{ |
|---|
| 805 | + u16 version, cipher_type; |
|---|
| 806 | + struct tls_context *ctx; |
|---|
| 807 | + struct nlattr *start; |
|---|
| 808 | + int err; |
|---|
| 809 | + |
|---|
| 810 | + start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS); |
|---|
| 811 | + if (!start) |
|---|
| 812 | + return -EMSGSIZE; |
|---|
| 813 | + |
|---|
| 814 | + rcu_read_lock(); |
|---|
| 815 | + ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data); |
|---|
| 816 | + if (!ctx) { |
|---|
| 817 | + err = 0; |
|---|
| 818 | + goto nla_failure; |
|---|
| 819 | + } |
|---|
| 820 | + version = ctx->prot_info.version; |
|---|
| 821 | + if (version) { |
|---|
| 822 | + err = nla_put_u16(skb, TLS_INFO_VERSION, version); |
|---|
| 823 | + if (err) |
|---|
| 824 | + goto nla_failure; |
|---|
| 825 | + } |
|---|
| 826 | + cipher_type = ctx->prot_info.cipher_type; |
|---|
| 827 | + if (cipher_type) { |
|---|
| 828 | + err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type); |
|---|
| 829 | + if (err) |
|---|
| 830 | + goto nla_failure; |
|---|
| 831 | + } |
|---|
| 832 | + err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true)); |
|---|
| 833 | + if (err) |
|---|
| 834 | + goto nla_failure; |
|---|
| 835 | + |
|---|
| 836 | + err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false)); |
|---|
| 837 | + if (err) |
|---|
| 838 | + goto nla_failure; |
|---|
| 839 | + |
|---|
| 840 | + rcu_read_unlock(); |
|---|
| 841 | + nla_nest_end(skb, start); |
|---|
| 842 | + return 0; |
|---|
| 843 | + |
|---|
| 844 | +nla_failure: |
|---|
| 845 | + rcu_read_unlock(); |
|---|
| 846 | + nla_nest_cancel(skb, start); |
|---|
| 847 | + return err; |
|---|
| 848 | +} |
|---|
| 849 | + |
|---|
| 850 | +static size_t tls_get_info_size(const struct sock *sk) |
|---|
| 851 | +{ |
|---|
| 852 | + size_t size = 0; |
|---|
| 853 | + |
|---|
| 854 | + size += nla_total_size(0) + /* INET_ULP_INFO_TLS */ |
|---|
| 855 | + nla_total_size(sizeof(u16)) + /* TLS_INFO_VERSION */ |
|---|
| 856 | + nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */ |
|---|
| 857 | + nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */ |
|---|
| 858 | + nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */ |
|---|
| 859 | + 0; |
|---|
| 860 | + |
|---|
| 861 | + return size; |
|---|
| 862 | +} |
|---|
| 863 | + |
|---|
| 864 | +static int __net_init tls_init_net(struct net *net) |
|---|
| 865 | +{ |
|---|
| 866 | + int err; |
|---|
| 867 | + |
|---|
| 868 | + net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib); |
|---|
| 869 | + if (!net->mib.tls_statistics) |
|---|
| 870 | + return -ENOMEM; |
|---|
| 871 | + |
|---|
| 872 | + err = tls_proc_init(net); |
|---|
| 873 | + if (err) |
|---|
| 874 | + goto err_free_stats; |
|---|
| 875 | + |
|---|
| 876 | + return 0; |
|---|
| 877 | +err_free_stats: |
|---|
| 878 | + free_percpu(net->mib.tls_statistics); |
|---|
| 879 | + return err; |
|---|
| 880 | +} |
|---|
| 881 | + |
|---|
| 882 | +static void __net_exit tls_exit_net(struct net *net) |
|---|
| 883 | +{ |
|---|
| 884 | + tls_proc_fini(net); |
|---|
| 885 | + free_percpu(net->mib.tls_statistics); |
|---|
| 886 | +} |
|---|
| 887 | + |
|---|
| 888 | +static struct pernet_operations tls_proc_ops = { |
|---|
| 889 | + .init = tls_init_net, |
|---|
| 890 | + .exit = tls_exit_net, |
|---|
| 891 | +}; |
|---|
| 724 | 892 | |
|---|
| 725 | 893 | static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { |
|---|
| 726 | 894 | .name = "tls", |
|---|
| 727 | | - .uid = TCP_ULP_TLS, |
|---|
| 728 | | - .user_visible = true, |
|---|
| 729 | 895 | .owner = THIS_MODULE, |
|---|
| 730 | 896 | .init = tls_init, |
|---|
| 897 | + .update = tls_update, |
|---|
| 898 | + .get_info = tls_get_info, |
|---|
| 899 | + .get_info_size = tls_get_info_size, |
|---|
| 731 | 900 | }; |
|---|
| 732 | 901 | |
|---|
| 733 | 902 | static int __init tls_register(void) |
|---|
| 734 | 903 | { |
|---|
| 735 | | - build_protos(tls_prots[TLSV4], &tcp_prot); |
|---|
| 904 | + int err; |
|---|
| 736 | 905 | |
|---|
| 737 | | - tls_sw_proto_ops = inet_stream_ops; |
|---|
| 738 | | - tls_sw_proto_ops.poll = tls_sw_poll; |
|---|
| 739 | | - tls_sw_proto_ops.splice_read = tls_sw_splice_read; |
|---|
| 906 | + err = register_pernet_subsys(&tls_proc_ops); |
|---|
| 907 | + if (err) |
|---|
| 908 | + return err; |
|---|
| 740 | 909 | |
|---|
| 741 | | -#ifdef CONFIG_TLS_DEVICE |
|---|
| 742 | | - tls_device_init(); |
|---|
| 743 | | -#endif |
|---|
| 910 | + err = tls_device_init(); |
|---|
| 911 | + if (err) { |
|---|
| 912 | + unregister_pernet_subsys(&tls_proc_ops); |
|---|
| 913 | + return err; |
|---|
| 914 | + } |
|---|
| 915 | + |
|---|
| 744 | 916 | tcp_register_ulp(&tcp_tls_ulp_ops); |
|---|
| 745 | 917 | |
|---|
| 746 | 918 | return 0; |
|---|
| .. | .. |
|---|
| 749 | 921 | static void __exit tls_unregister(void) |
|---|
| 750 | 922 | { |
|---|
| 751 | 923 | tcp_unregister_ulp(&tcp_tls_ulp_ops); |
|---|
| 752 | | -#ifdef CONFIG_TLS_DEVICE |
|---|
| 753 | 924 | tls_device_cleanup(); |
|---|
| 754 | | -#endif |
|---|
| 925 | + unregister_pernet_subsys(&tls_proc_ops); |
|---|
| 755 | 926 | } |
|---|
| 756 | 927 | |
|---|
| 757 | 928 | module_init(tls_register); |
|---|