| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: ISC |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (c) 2005-2011 Atheros Communications Inc. |
|---|
| 3 | 4 | * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. |
|---|
| 4 | 5 | * Copyright (c) 2018 The Linux Foundation. All rights reserved. |
|---|
| 5 | | - * |
|---|
| 6 | | - * Permission to use, copy, modify, and/or distribute this software for any |
|---|
| 7 | | - * purpose with or without fee is hereby granted, provided that the above |
|---|
| 8 | | - * copyright notice and this permission notice appear in all copies. |
|---|
| 9 | | - * |
|---|
| 10 | | - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
|---|
| 11 | | - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
|---|
| 12 | | - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
|---|
| 13 | | - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
|---|
| 14 | | - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
|---|
| 15 | | - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
|---|
| 16 | | - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
|---|
| 17 | 6 | */ |
|---|
| 18 | 7 | |
|---|
| 19 | 8 | #include "hif.h" |
|---|
| .. | .. |
|---|
| 228 | 217 | } |
|---|
| 229 | 218 | |
|---|
| 230 | 219 | static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, |
|---|
| 231 | | - u32 ce_ctrl_addr, |
|---|
| 232 | | - unsigned int addr) |
|---|
| 220 | + u32 ce_id, |
|---|
| 221 | + u64 addr) |
|---|
| 233 | 222 | { |
|---|
| 223 | + struct ath10k_ce *ce = ath10k_ce_priv(ar); |
|---|
| 224 | + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; |
|---|
| 225 | + u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id); |
|---|
| 226 | + u32 addr_lo = lower_32_bits(addr); |
|---|
| 227 | + |
|---|
| 234 | 228 | ath10k_ce_write32(ar, ce_ctrl_addr + |
|---|
| 235 | | - ar->hw_ce_regs->sr_base_addr, addr); |
|---|
| 229 | + ar->hw_ce_regs->sr_base_addr_lo, addr_lo); |
|---|
| 230 | + |
|---|
| 231 | + if (ce_state->ops->ce_set_src_ring_base_addr_hi) { |
|---|
| 232 | + ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr, |
|---|
| 233 | + addr); |
|---|
| 234 | + } |
|---|
| 235 | +} |
|---|
| 236 | + |
|---|
| 237 | +static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar, |
|---|
| 238 | + u32 ce_ctrl_addr, |
|---|
| 239 | + u64 addr) |
|---|
| 240 | +{ |
|---|
| 241 | + u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK; |
|---|
| 242 | + |
|---|
| 243 | + ath10k_ce_write32(ar, ce_ctrl_addr + |
|---|
| 244 | + ar->hw_ce_regs->sr_base_addr_hi, addr_hi); |
|---|
| 236 | 245 | } |
|---|
| 237 | 246 | |
|---|
| 238 | 247 | static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, |
|---|
| .. | .. |
|---|
| 313 | 322 | } |
|---|
| 314 | 323 | |
|---|
| 315 | 324 | static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, |
|---|
| 316 | | - u32 ce_ctrl_addr, |
|---|
| 317 | | - u32 addr) |
|---|
| 325 | + u32 ce_id, |
|---|
| 326 | + u64 addr) |
|---|
| 318 | 327 | { |
|---|
| 328 | + struct ath10k_ce *ce = ath10k_ce_priv(ar); |
|---|
| 329 | + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; |
|---|
| 330 | + u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id); |
|---|
| 331 | + u32 addr_lo = lower_32_bits(addr); |
|---|
| 332 | + |
|---|
| 319 | 333 | ath10k_ce_write32(ar, ce_ctrl_addr + |
|---|
| 320 | | - ar->hw_ce_regs->dr_base_addr, addr); |
|---|
| 334 | + ar->hw_ce_regs->dr_base_addr_lo, addr_lo); |
|---|
| 335 | + |
|---|
| 336 | + if (ce_state->ops->ce_set_dest_ring_base_addr_hi) { |
|---|
| 337 | + ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr, |
|---|
| 338 | + addr); |
|---|
| 339 | + } |
|---|
| 340 | +} |
|---|
| 341 | + |
|---|
| 342 | +static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar, |
|---|
| 343 | + u32 ce_ctrl_addr, |
|---|
| 344 | + u64 addr) |
|---|
| 345 | +{ |
|---|
| 346 | + u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK; |
|---|
| 347 | + u32 reg_value; |
|---|
| 348 | + |
|---|
| 349 | + reg_value = ath10k_ce_read32(ar, ce_ctrl_addr + |
|---|
| 350 | + ar->hw_ce_regs->dr_base_addr_hi); |
|---|
| 351 | + reg_value &= ~CE_DESC_ADDR_HI_MASK; |
|---|
| 352 | + reg_value |= addr_hi; |
|---|
| 353 | + ath10k_ce_write32(ar, ce_ctrl_addr + |
|---|
| 354 | + ar->hw_ce_regs->dr_base_addr_hi, reg_value); |
|---|
| 321 | 355 | } |
|---|
| 322 | 356 | |
|---|
| 323 | 357 | static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, |
|---|
| .. | .. |
|---|
| 557 | 591 | |
|---|
| 558 | 592 | addr = (__le32 *)&sdesc.addr; |
|---|
| 559 | 593 | |
|---|
| 560 | | - flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK; |
|---|
| 594 | + flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK; |
|---|
| 561 | 595 | addr[0] = __cpu_to_le32(buffer); |
|---|
| 562 | 596 | addr[1] = __cpu_to_le32(flags); |
|---|
| 563 | 597 | if (flags & CE_SEND_FLAG_GATHER) |
|---|
| .. | .. |
|---|
| 731 | 765 | return -ENOSPC; |
|---|
| 732 | 766 | |
|---|
| 733 | 767 | desc->addr = __cpu_to_le64(paddr); |
|---|
| 734 | | - desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK); |
|---|
| 768 | + desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK); |
|---|
| 735 | 769 | |
|---|
| 736 | 770 | desc->nbytes = 0; |
|---|
| 737 | 771 | |
|---|
| .. | .. |
|---|
| 1032 | 1066 | * Guts of ath10k_ce_completed_send_next. |
|---|
| 1033 | 1067 | * The caller takes responsibility for any necessary locking. |
|---|
| 1034 | 1068 | */ |
|---|
| 1035 | | -int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, |
|---|
| 1036 | | - void **per_transfer_contextp) |
|---|
| 1069 | +static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, |
|---|
| 1070 | + void **per_transfer_contextp) |
|---|
| 1037 | 1071 | { |
|---|
| 1038 | 1072 | struct ath10k_ce_ring *src_ring = ce_state->src_ring; |
|---|
| 1039 | 1073 | u32 ctrl_addr = ce_state->ctrl_addr; |
|---|
| .. | .. |
|---|
| 1083 | 1117 | src_ring->sw_index = sw_index; |
|---|
| 1084 | 1118 | |
|---|
| 1085 | 1119 | return 0; |
|---|
| 1120 | +} |
|---|
| 1121 | + |
|---|
| 1122 | +static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state, |
|---|
| 1123 | + void **per_transfer_contextp) |
|---|
| 1124 | +{ |
|---|
| 1125 | + struct ath10k_ce_ring *src_ring = ce_state->src_ring; |
|---|
| 1126 | + u32 ctrl_addr = ce_state->ctrl_addr; |
|---|
| 1127 | + struct ath10k *ar = ce_state->ar; |
|---|
| 1128 | + unsigned int nentries_mask = src_ring->nentries_mask; |
|---|
| 1129 | + unsigned int sw_index = src_ring->sw_index; |
|---|
| 1130 | + unsigned int read_index; |
|---|
| 1131 | + struct ce_desc_64 *desc; |
|---|
| 1132 | + |
|---|
| 1133 | + if (src_ring->hw_index == sw_index) { |
|---|
| 1134 | + /* |
|---|
| 1135 | + * The SW completion index has caught up with the cached |
|---|
| 1136 | + * version of the HW completion index. |
|---|
| 1137 | + * Update the cached HW completion index to see whether |
|---|
| 1138 | + * the SW has really caught up to the HW, or if the cached |
|---|
| 1139 | + * value of the HW index has become stale. |
|---|
| 1140 | + */ |
|---|
| 1141 | + |
|---|
| 1142 | + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); |
|---|
| 1143 | + if (read_index == 0xffffffff) |
|---|
| 1144 | + return -ENODEV; |
|---|
| 1145 | + |
|---|
| 1146 | + read_index &= nentries_mask; |
|---|
| 1147 | + src_ring->hw_index = read_index; |
|---|
| 1148 | + } |
|---|
| 1149 | + |
|---|
| 1150 | + if (ar->hw_params.rri_on_ddr) |
|---|
| 1151 | + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); |
|---|
| 1152 | + else |
|---|
| 1153 | + read_index = src_ring->hw_index; |
|---|
| 1154 | + |
|---|
| 1155 | + if (read_index == sw_index) |
|---|
| 1156 | + return -EIO; |
|---|
| 1157 | + |
|---|
| 1158 | + if (per_transfer_contextp) |
|---|
| 1159 | + *per_transfer_contextp = |
|---|
| 1160 | + src_ring->per_transfer_context[sw_index]; |
|---|
| 1161 | + |
|---|
| 1162 | + /* sanity */ |
|---|
| 1163 | + src_ring->per_transfer_context[sw_index] = NULL; |
|---|
| 1164 | + desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space, |
|---|
| 1165 | + sw_index); |
|---|
| 1166 | + desc->nbytes = 0; |
|---|
| 1167 | + |
|---|
| 1168 | + /* Update sw_index */ |
|---|
| 1169 | + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); |
|---|
| 1170 | + src_ring->sw_index = sw_index; |
|---|
| 1171 | + |
|---|
| 1172 | + return 0; |
|---|
| 1173 | +} |
|---|
| 1174 | + |
|---|
| 1175 | +int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, |
|---|
| 1176 | + void **per_transfer_contextp) |
|---|
| 1177 | +{ |
|---|
| 1178 | + return ce_state->ops->ce_completed_send_next_nolock(ce_state, |
|---|
| 1179 | + per_transfer_contextp); |
|---|
| 1086 | 1180 | } |
|---|
| 1087 | 1181 | EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock); |
|---|
| 1088 | 1182 | |
|---|
| .. | .. |
|---|
| 1205 | 1299 | struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; |
|---|
| 1206 | 1300 | u32 ctrl_addr = ce_state->ctrl_addr; |
|---|
| 1207 | 1301 | |
|---|
| 1208 | | - spin_lock_bh(&ce->ce_lock); |
|---|
| 1209 | | - |
|---|
| 1210 | | - /* Clear the copy-complete interrupts that will be handled here. */ |
|---|
| 1302 | + /* |
|---|
| 1303 | + * Clear before handling |
|---|
| 1304 | + * |
|---|
| 1305 | + * Misc CE interrupts are not being handled, but still need |
|---|
| 1306 | + * to be cleared. |
|---|
| 1307 | + * |
|---|
| 1308 | + * NOTE: When the last copy engine interrupt is cleared the |
|---|
| 1309 | + * hardware will go to sleep. Once this happens any access to |
|---|
| 1310 | + * the CE registers can cause a hardware fault. |
|---|
| 1311 | + */ |
|---|
| 1211 | 1312 | ath10k_ce_engine_int_status_clear(ar, ctrl_addr, |
|---|
| 1212 | | - wm_regs->cc_mask); |
|---|
| 1213 | | - |
|---|
| 1214 | | - spin_unlock_bh(&ce->ce_lock); |
|---|
| 1313 | + wm_regs->cc_mask | wm_regs->wm_mask); |
|---|
| 1215 | 1314 | |
|---|
| 1216 | 1315 | if (ce_state->recv_cb) |
|---|
| 1217 | 1316 | ce_state->recv_cb(ce_state); |
|---|
| 1218 | 1317 | |
|---|
| 1219 | 1318 | if (ce_state->send_cb) |
|---|
| 1220 | 1319 | ce_state->send_cb(ce_state); |
|---|
| 1221 | | - |
|---|
| 1222 | | - spin_lock_bh(&ce->ce_lock); |
|---|
| 1223 | | - |
|---|
| 1224 | | - /* |
|---|
| 1225 | | - * Misc CE interrupts are not being handled, but still need |
|---|
| 1226 | | - * to be cleared. |
|---|
| 1227 | | - */ |
|---|
| 1228 | | - ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask); |
|---|
| 1229 | | - |
|---|
| 1230 | | - spin_unlock_bh(&ce->ce_lock); |
|---|
| 1231 | 1320 | } |
|---|
| 1232 | 1321 | EXPORT_SYMBOL(ath10k_ce_per_engine_service); |
|---|
| 1233 | 1322 | |
|---|
| .. | .. |
|---|
| 1278 | 1367 | ath10k_ce_watermark_intr_disable(ar, ctrl_addr); |
|---|
| 1279 | 1368 | } |
|---|
| 1280 | 1369 | |
|---|
| 1281 | | -int ath10k_ce_disable_interrupts(struct ath10k *ar) |
|---|
| 1370 | +void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id) |
|---|
| 1371 | +{ |
|---|
| 1372 | + struct ath10k_ce *ce = ath10k_ce_priv(ar); |
|---|
| 1373 | + struct ath10k_ce_pipe *ce_state; |
|---|
| 1374 | + u32 ctrl_addr; |
|---|
| 1375 | + |
|---|
| 1376 | + ce_state = &ce->ce_states[ce_id]; |
|---|
| 1377 | + if (ce_state->attr_flags & CE_ATTR_POLL) |
|---|
| 1378 | + return; |
|---|
| 1379 | + |
|---|
| 1380 | + ctrl_addr = ath10k_ce_base_address(ar, ce_id); |
|---|
| 1381 | + |
|---|
| 1382 | + ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); |
|---|
| 1383 | + ath10k_ce_error_intr_disable(ar, ctrl_addr); |
|---|
| 1384 | + ath10k_ce_watermark_intr_disable(ar, ctrl_addr); |
|---|
| 1385 | +} |
|---|
| 1386 | +EXPORT_SYMBOL(ath10k_ce_disable_interrupt); |
|---|
| 1387 | + |
|---|
| 1388 | +void ath10k_ce_disable_interrupts(struct ath10k *ar) |
|---|
| 1282 | 1389 | { |
|---|
| 1283 | 1390 | int ce_id; |
|---|
| 1284 | 1391 | |
|---|
| 1285 | | - for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { |
|---|
| 1286 | | - u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); |
|---|
| 1287 | | - |
|---|
| 1288 | | - ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); |
|---|
| 1289 | | - ath10k_ce_error_intr_disable(ar, ctrl_addr); |
|---|
| 1290 | | - ath10k_ce_watermark_intr_disable(ar, ctrl_addr); |
|---|
| 1291 | | - } |
|---|
| 1292 | | - |
|---|
| 1293 | | - return 0; |
|---|
| 1392 | + for (ce_id = 0; ce_id < CE_COUNT; ce_id++) |
|---|
| 1393 | + ath10k_ce_disable_interrupt(ar, ce_id); |
|---|
| 1294 | 1394 | } |
|---|
| 1295 | 1395 | EXPORT_SYMBOL(ath10k_ce_disable_interrupts); |
|---|
| 1296 | 1396 | |
|---|
| 1297 | | -void ath10k_ce_enable_interrupts(struct ath10k *ar) |
|---|
| 1397 | +void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id) |
|---|
| 1298 | 1398 | { |
|---|
| 1299 | 1399 | struct ath10k_ce *ce = ath10k_ce_priv(ar); |
|---|
| 1300 | | - int ce_id; |
|---|
| 1301 | 1400 | struct ath10k_ce_pipe *ce_state; |
|---|
| 1302 | 1401 | |
|---|
| 1303 | | - /* Skip the last copy engine, CE7 the diagnostic window, as that |
|---|
| 1304 | | - * uses polling and isn't initialized for interrupts. |
|---|
| 1402 | + ce_state = &ce->ce_states[ce_id]; |
|---|
| 1403 | + if (ce_state->attr_flags & CE_ATTR_POLL) |
|---|
| 1404 | + return; |
|---|
| 1405 | + |
|---|
| 1406 | + ath10k_ce_per_engine_handler_adjust(ce_state); |
|---|
| 1407 | +} |
|---|
| 1408 | +EXPORT_SYMBOL(ath10k_ce_enable_interrupt); |
|---|
| 1409 | + |
|---|
| 1410 | +void ath10k_ce_enable_interrupts(struct ath10k *ar) |
|---|
| 1411 | +{ |
|---|
| 1412 | + int ce_id; |
|---|
| 1413 | + |
|---|
| 1414 | + /* Enable interrupts for copy engine that |
|---|
| 1415 | + * are not using polling mode. |
|---|
| 1305 | 1416 | */ |
|---|
| 1306 | | - for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) { |
|---|
| 1307 | | - ce_state = &ce->ce_states[ce_id]; |
|---|
| 1308 | | - ath10k_ce_per_engine_handler_adjust(ce_state); |
|---|
| 1309 | | - } |
|---|
| 1417 | + for (ce_id = 0; ce_id < CE_COUNT; ce_id++) |
|---|
| 1418 | + ath10k_ce_enable_interrupt(ar, ce_id); |
|---|
| 1310 | 1419 | } |
|---|
| 1311 | 1420 | EXPORT_SYMBOL(ath10k_ce_enable_interrupts); |
|---|
| 1312 | 1421 | |
|---|
| .. | .. |
|---|
| 1336 | 1445 | ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); |
|---|
| 1337 | 1446 | src_ring->write_index &= src_ring->nentries_mask; |
|---|
| 1338 | 1447 | |
|---|
| 1339 | | - ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, |
|---|
| 1448 | + ath10k_ce_src_ring_base_addr_set(ar, ce_id, |
|---|
| 1340 | 1449 | src_ring->base_addr_ce_space); |
|---|
| 1341 | 1450 | ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); |
|---|
| 1342 | 1451 | ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); |
|---|
| .. | .. |
|---|
| 1375 | 1484 | ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); |
|---|
| 1376 | 1485 | dest_ring->write_index &= dest_ring->nentries_mask; |
|---|
| 1377 | 1486 | |
|---|
| 1378 | | - ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, |
|---|
| 1487 | + ath10k_ce_dest_ring_base_addr_set(ar, ce_id, |
|---|
| 1379 | 1488 | dest_ring->base_addr_ce_space); |
|---|
| 1380 | 1489 | ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); |
|---|
| 1381 | 1490 | ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); |
|---|
| .. | .. |
|---|
| 1416 | 1525 | |
|---|
| 1417 | 1526 | nentries = roundup_pow_of_two(nentries); |
|---|
| 1418 | 1527 | |
|---|
| 1419 | | - src_ring = kzalloc(sizeof(*src_ring) + |
|---|
| 1420 | | - (nentries * |
|---|
| 1421 | | - sizeof(*src_ring->per_transfer_context)), |
|---|
| 1422 | | - GFP_KERNEL); |
|---|
| 1528 | + src_ring = kzalloc(struct_size(src_ring, per_transfer_context, |
|---|
| 1529 | + nentries), GFP_KERNEL); |
|---|
| 1423 | 1530 | if (src_ring == NULL) |
|---|
| 1424 | 1531 | return ERR_PTR(-ENOMEM); |
|---|
| 1425 | 1532 | |
|---|
| .. | .. |
|---|
| 1476 | 1583 | |
|---|
| 1477 | 1584 | nentries = roundup_pow_of_two(nentries); |
|---|
| 1478 | 1585 | |
|---|
| 1479 | | - src_ring = kzalloc(sizeof(*src_ring) + |
|---|
| 1480 | | - (nentries * |
|---|
| 1481 | | - sizeof(*src_ring->per_transfer_context)), |
|---|
| 1482 | | - GFP_KERNEL); |
|---|
| 1586 | + src_ring = kzalloc(struct_size(src_ring, per_transfer_context, |
|---|
| 1587 | + nentries), GFP_KERNEL); |
|---|
| 1483 | 1588 | if (!src_ring) |
|---|
| 1484 | 1589 | return ERR_PTR(-ENOMEM); |
|---|
| 1485 | 1590 | |
|---|
| .. | .. |
|---|
| 1534 | 1639 | |
|---|
| 1535 | 1640 | nentries = roundup_pow_of_two(attr->dest_nentries); |
|---|
| 1536 | 1641 | |
|---|
| 1537 | | - dest_ring = kzalloc(sizeof(*dest_ring) + |
|---|
| 1538 | | - (nentries * |
|---|
| 1539 | | - sizeof(*dest_ring->per_transfer_context)), |
|---|
| 1540 | | - GFP_KERNEL); |
|---|
| 1642 | + dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context, |
|---|
| 1643 | + nentries), GFP_KERNEL); |
|---|
| 1541 | 1644 | if (dest_ring == NULL) |
|---|
| 1542 | 1645 | return ERR_PTR(-ENOMEM); |
|---|
| 1543 | 1646 | |
|---|
| .. | .. |
|---|
| 1549 | 1652 | * coherent DMA are unsupported |
|---|
| 1550 | 1653 | */ |
|---|
| 1551 | 1654 | dest_ring->base_addr_owner_space_unaligned = |
|---|
| 1552 | | - dma_zalloc_coherent(ar->dev, |
|---|
| 1553 | | - (nentries * sizeof(struct ce_desc) + |
|---|
| 1554 | | - CE_DESC_RING_ALIGN), |
|---|
| 1555 | | - &base_addr, GFP_KERNEL); |
|---|
| 1655 | + dma_alloc_coherent(ar->dev, |
|---|
| 1656 | + (nentries * sizeof(struct ce_desc) + |
|---|
| 1657 | + CE_DESC_RING_ALIGN), |
|---|
| 1658 | + &base_addr, GFP_KERNEL); |
|---|
| 1556 | 1659 | if (!dest_ring->base_addr_owner_space_unaligned) { |
|---|
| 1557 | 1660 | kfree(dest_ring); |
|---|
| 1558 | 1661 | return ERR_PTR(-ENOMEM); |
|---|
| .. | .. |
|---|
| 1580 | 1683 | |
|---|
| 1581 | 1684 | nentries = roundup_pow_of_two(attr->dest_nentries); |
|---|
| 1582 | 1685 | |
|---|
| 1583 | | - dest_ring = kzalloc(sizeof(*dest_ring) + |
|---|
| 1584 | | - (nentries * |
|---|
| 1585 | | - sizeof(*dest_ring->per_transfer_context)), |
|---|
| 1586 | | - GFP_KERNEL); |
|---|
| 1686 | + dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context, |
|---|
| 1687 | + nentries), GFP_KERNEL); |
|---|
| 1587 | 1688 | if (!dest_ring) |
|---|
| 1588 | 1689 | return ERR_PTR(-ENOMEM); |
|---|
| 1589 | 1690 | |
|---|
| .. | .. |
|---|
| 1608 | 1709 | /* Correctly initialize memory to 0 to prevent garbage |
|---|
| 1609 | 1710 | * data crashing system when download firmware |
|---|
| 1610 | 1711 | */ |
|---|
| 1611 | | - memset(dest_ring->base_addr_owner_space_unaligned, 0, |
|---|
| 1612 | | - nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN); |
|---|
| 1613 | | - |
|---|
| 1614 | 1712 | dest_ring->base_addr_owner_space = |
|---|
| 1615 | 1713 | PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned, |
|---|
| 1616 | 1714 | CE_DESC_RING_ALIGN); |
|---|
| .. | .. |
|---|
| 1659 | 1757 | { |
|---|
| 1660 | 1758 | u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); |
|---|
| 1661 | 1759 | |
|---|
| 1662 | | - ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); |
|---|
| 1760 | + ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0); |
|---|
| 1663 | 1761 | ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); |
|---|
| 1664 | 1762 | ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0); |
|---|
| 1665 | 1763 | ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0); |
|---|
| .. | .. |
|---|
| 1669 | 1767 | { |
|---|
| 1670 | 1768 | u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); |
|---|
| 1671 | 1769 | |
|---|
| 1672 | | - ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); |
|---|
| 1770 | + ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0); |
|---|
| 1673 | 1771 | ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); |
|---|
| 1674 | 1772 | ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0); |
|---|
| 1675 | 1773 | } |
|---|
| .. | .. |
|---|
| 1759 | 1857 | struct ath10k_ce_crash_data ce_data; |
|---|
| 1760 | 1858 | u32 addr, id; |
|---|
| 1761 | 1859 | |
|---|
| 1762 | | - lockdep_assert_held(&ar->data_lock); |
|---|
| 1860 | + lockdep_assert_held(&ar->dump_mutex); |
|---|
| 1763 | 1861 | |
|---|
| 1764 | 1862 | ath10k_err(ar, "Copy Engine register dump:\n"); |
|---|
| 1765 | 1863 | |
|---|
| .. | .. |
|---|
| 1801 | 1899 | .ce_extract_desc_data = ath10k_ce_extract_desc_data, |
|---|
| 1802 | 1900 | .ce_free_pipe = _ath10k_ce_free_pipe, |
|---|
| 1803 | 1901 | .ce_send_nolock = _ath10k_ce_send_nolock, |
|---|
| 1902 | + .ce_set_src_ring_base_addr_hi = NULL, |
|---|
| 1903 | + .ce_set_dest_ring_base_addr_hi = NULL, |
|---|
| 1904 | + .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock, |
|---|
| 1804 | 1905 | }; |
|---|
| 1805 | 1906 | |
|---|
| 1806 | 1907 | static const struct ath10k_ce_ops ce_64_ops = { |
|---|
| .. | .. |
|---|
| 1813 | 1914 | .ce_extract_desc_data = ath10k_ce_extract_desc_data_64, |
|---|
| 1814 | 1915 | .ce_free_pipe = _ath10k_ce_free_pipe_64, |
|---|
| 1815 | 1916 | .ce_send_nolock = _ath10k_ce_send_nolock_64, |
|---|
| 1917 | + .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi, |
|---|
| 1918 | + .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi, |
|---|
| 1919 | + .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64, |
|---|
| 1816 | 1920 | }; |
|---|
| 1817 | 1921 | |
|---|
| 1818 | 1922 | static void ath10k_ce_set_ops(struct ath10k *ar, |
|---|
| .. | .. |
|---|
| 1908 | 2012 | lower_32_bits(ce->paddr_rri)); |
|---|
| 1909 | 2013 | ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high, |
|---|
| 1910 | 2014 | (upper_32_bits(ce->paddr_rri) & |
|---|
| 1911 | | - CE_DESC_FLAGS_GET_MASK)); |
|---|
| 2015 | + CE_DESC_ADDR_HI_MASK)); |
|---|
| 1912 | 2016 | |
|---|
| 1913 | 2017 | for (i = 0; i < CE_COUNT; i++) { |
|---|
| 1914 | 2018 | ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr; |
|---|
| .. | .. |
|---|
| 1917 | 2021 | value |= ar->hw_ce_regs->upd->mask; |
|---|
| 1918 | 2022 | ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value); |
|---|
| 1919 | 2023 | } |
|---|
| 1920 | | - |
|---|
| 1921 | | - memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32)); |
|---|
| 1922 | 2024 | } |
|---|
| 1923 | 2025 | EXPORT_SYMBOL(ath10k_ce_alloc_rri); |
|---|
| 1924 | 2026 | |
|---|