| .. | .. |
|---|
| 312 | 312 | static bool tcp_fastopen_queue_check(struct sock *sk) |
|---|
| 313 | 313 | { |
|---|
| 314 | 314 | struct fastopen_queue *fastopenq; |
|---|
| 315 | + int max_qlen; |
|---|
| 315 | 316 | |
|---|
| 316 | 317 | /* Make sure the listener has enabled fastopen, and we don't |
|---|
| 317 | 318 | * exceed the max # of pending TFO requests allowed before trying |
|---|
| .. | .. |
|---|
| 324 | 325 | * temporarily vs a server not supporting Fast Open at all. |
|---|
| 325 | 326 | */ |
|---|
| 326 | 327 | fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; |
|---|
| 327 | | - if (fastopenq->max_qlen == 0) |
|---|
| 328 | + max_qlen = READ_ONCE(fastopenq->max_qlen); |
|---|
| 329 | + if (max_qlen == 0) |
|---|
| 328 | 330 | return false; |
|---|
| 329 | 331 | |
|---|
| 330 | | - if (fastopenq->qlen >= fastopenq->max_qlen) { |
|---|
| 332 | + if (fastopenq->qlen >= max_qlen) { |
|---|
| 331 | 333 | struct request_sock *req1; |
|---|
| 332 | 334 | spin_lock(&fastopenq->lock); |
|---|
| 333 | 335 | req1 = fastopenq->rskq_rst_head; |
|---|