.. | .. |
---|
26 | 26 | * Created on: Aug 30, 2016 |
---|
27 | 27 | * Author: agrodzov |
---|
28 | 28 | */ |
---|
| 29 | + |
---|
| 30 | +#include <linux/delay.h> |
---|
| 31 | + |
---|
29 | 32 | #include "dm_services.h" |
---|
30 | 33 | #include <stdarg.h> |
---|
31 | 34 | |
---|
32 | | -uint32_t generic_reg_update_ex(const struct dc_context *ctx, |
---|
33 | | - uint32_t addr, uint32_t reg_val, int n, |
---|
| 35 | +#include "dc.h" |
---|
| 36 | +#include "dc_dmub_srv.h" |
---|
| 37 | + |
---|
| 38 | +static inline void submit_dmub_read_modify_write( |
---|
| 39 | + struct dc_reg_helper_state *offload, |
---|
| 40 | + const struct dc_context *ctx) |
---|
| 41 | +{ |
---|
| 42 | + struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; |
---|
| 43 | + bool gather = false; |
---|
| 44 | + |
---|
| 45 | + offload->should_burst_write = |
---|
| 46 | + (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); |
---|
| 47 | + cmd_buf->header.payload_bytes = |
---|
| 48 | + sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; |
---|
| 49 | + |
---|
| 50 | + gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; |
---|
| 51 | + ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; |
---|
| 52 | + |
---|
| 53 | + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); |
---|
| 54 | + |
---|
| 55 | + ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; |
---|
| 56 | + |
---|
| 57 | + memset(cmd_buf, 0, sizeof(*cmd_buf)); |
---|
| 58 | + |
---|
| 59 | + offload->reg_seq_count = 0; |
---|
| 60 | + offload->same_addr_count = 0; |
---|
| 61 | +} |
---|
| 62 | + |
---|
| 63 | +static inline void submit_dmub_burst_write( |
---|
| 64 | + struct dc_reg_helper_state *offload, |
---|
| 65 | + const struct dc_context *ctx) |
---|
| 66 | +{ |
---|
| 67 | + struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; |
---|
| 68 | + bool gather = false; |
---|
| 69 | + |
---|
| 70 | + cmd_buf->header.payload_bytes = |
---|
| 71 | + sizeof(uint32_t) * offload->reg_seq_count; |
---|
| 72 | + |
---|
| 73 | + gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; |
---|
| 74 | + ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; |
---|
| 75 | + |
---|
| 76 | + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); |
---|
| 77 | + |
---|
| 78 | + ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; |
---|
| 79 | + |
---|
| 80 | + memset(cmd_buf, 0, sizeof(*cmd_buf)); |
---|
| 81 | + |
---|
| 82 | + offload->reg_seq_count = 0; |
---|
| 83 | +} |
---|
| 84 | + |
---|
| 85 | +static inline void submit_dmub_reg_wait( |
---|
| 86 | + struct dc_reg_helper_state *offload, |
---|
| 87 | + const struct dc_context *ctx) |
---|
| 88 | +{ |
---|
| 89 | + struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; |
---|
| 90 | + bool gather = false; |
---|
| 91 | + |
---|
| 92 | + gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; |
---|
| 93 | + ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; |
---|
| 94 | + |
---|
| 95 | + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); |
---|
| 96 | + |
---|
| 97 | + memset(cmd_buf, 0, sizeof(*cmd_buf)); |
---|
| 98 | + offload->reg_seq_count = 0; |
---|
| 99 | + |
---|
| 100 | + ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; |
---|
| 101 | +} |
---|
| 102 | + |
---|
| 103 | +struct dc_reg_value_masks { |
---|
| 104 | + uint32_t value; |
---|
| 105 | + uint32_t mask; |
---|
| 106 | +}; |
---|
| 107 | + |
---|
| 108 | +struct dc_reg_sequence { |
---|
| 109 | + uint32_t addr; |
---|
| 110 | + struct dc_reg_value_masks value_masks; |
---|
| 111 | +}; |
---|
| 112 | + |
---|
| 113 | +static inline void set_reg_field_value_masks( |
---|
| 114 | + struct dc_reg_value_masks *field_value_mask, |
---|
| 115 | + uint32_t value, |
---|
| 116 | + uint32_t mask, |
---|
| 117 | + uint8_t shift) |
---|
| 118 | +{ |
---|
| 119 | + ASSERT(mask != 0); |
---|
| 120 | + |
---|
| 121 | + field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift)); |
---|
| 122 | + field_value_mask->mask = field_value_mask->mask | mask; |
---|
| 123 | +} |
---|
| 124 | + |
---|
| 125 | +static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask, |
---|
| 126 | + uint32_t addr, int n, |
---|
34 | 127 | uint8_t shift1, uint32_t mask1, uint32_t field_value1, |
---|
35 | | - ...) |
---|
| 128 | + va_list ap) |
---|
36 | 129 | { |
---|
37 | 130 | uint32_t shift, mask, field_value; |
---|
38 | 131 | int i = 1; |
---|
39 | 132 | |
---|
40 | | - va_list ap; |
---|
41 | | - va_start(ap, field_value1); |
---|
42 | | - |
---|
43 | | - reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); |
---|
| 133 | + /* gather all bits value/mask getting updated in this register */ |
---|
| 134 | + set_reg_field_value_masks(field_value_mask, |
---|
| 135 | + field_value1, mask1, shift1); |
---|
44 | 136 | |
---|
45 | 137 | while (i < n) { |
---|
46 | 138 | shift = va_arg(ap, uint32_t); |
---|
47 | 139 | mask = va_arg(ap, uint32_t); |
---|
48 | 140 | field_value = va_arg(ap, uint32_t); |
---|
49 | 141 | |
---|
50 | | - reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); |
---|
| 142 | + set_reg_field_value_masks(field_value_mask, |
---|
| 143 | + field_value, mask, shift); |
---|
51 | 144 | i++; |
---|
| 145 | + } |
---|
| 146 | +} |
---|
| 147 | + |
---|
| 148 | +static void dmub_flush_buffer_execute( |
---|
| 149 | + struct dc_reg_helper_state *offload, |
---|
| 150 | + const struct dc_context *ctx) |
---|
| 151 | +{ |
---|
| 152 | + submit_dmub_read_modify_write(offload, ctx); |
---|
| 153 | + dc_dmub_srv_cmd_execute(ctx->dmub_srv); |
---|
| 154 | +} |
---|
| 155 | + |
---|
| 156 | +static void dmub_flush_burst_write_buffer_execute( |
---|
| 157 | + struct dc_reg_helper_state *offload, |
---|
| 158 | + const struct dc_context *ctx) |
---|
| 159 | +{ |
---|
| 160 | + submit_dmub_burst_write(offload, ctx); |
---|
| 161 | + dc_dmub_srv_cmd_execute(ctx->dmub_srv); |
---|
| 162 | +} |
---|
| 163 | + |
---|
| 164 | +static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, |
---|
| 165 | + uint32_t reg_val) |
---|
| 166 | +{ |
---|
| 167 | + struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; |
---|
| 168 | + struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; |
---|
| 169 | + |
---|
| 170 | + /* flush command if buffer is full */ |
---|
| 171 | + if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX) |
---|
| 172 | + dmub_flush_burst_write_buffer_execute(offload, ctx); |
---|
| 173 | + |
---|
| 174 | + if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE && |
---|
| 175 | + addr != cmd_buf->addr) { |
---|
| 176 | + dmub_flush_burst_write_buffer_execute(offload, ctx); |
---|
| 177 | + return false; |
---|
| 178 | + } |
---|
| 179 | + |
---|
| 180 | + cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE; |
---|
| 181 | + cmd_buf->header.sub_type = 0; |
---|
| 182 | + cmd_buf->addr = addr; |
---|
| 183 | + cmd_buf->write_values[offload->reg_seq_count] = reg_val; |
---|
| 184 | + offload->reg_seq_count++; |
---|
| 185 | + |
---|
| 186 | + return true; |
---|
| 187 | +} |
---|
| 188 | + |
---|
| 189 | +static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr, |
---|
| 190 | + struct dc_reg_value_masks *field_value_mask) |
---|
| 191 | +{ |
---|
| 192 | + struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; |
---|
| 193 | + struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; |
---|
| 194 | + struct dmub_cmd_read_modify_write_sequence *seq; |
---|
| 195 | + |
---|
| 196 | + /* flush command if buffer is full */ |
---|
| 197 | + if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE && |
---|
| 198 | + offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX) |
---|
| 199 | + dmub_flush_buffer_execute(offload, ctx); |
---|
| 200 | + |
---|
| 201 | + if (offload->should_burst_write) { |
---|
| 202 | + if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value)) |
---|
| 203 | + return field_value_mask->value; |
---|
| 204 | + else |
---|
| 205 | + offload->should_burst_write = false; |
---|
| 206 | + } |
---|
| 207 | + |
---|
| 208 | + /* pack commands */ |
---|
| 209 | + cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE; |
---|
| 210 | + cmd_buf->header.sub_type = 0; |
---|
| 211 | + seq = &cmd_buf->seq[offload->reg_seq_count]; |
---|
| 212 | + |
---|
| 213 | + if (offload->reg_seq_count) { |
---|
| 214 | + if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr) |
---|
| 215 | + offload->same_addr_count++; |
---|
| 216 | + else |
---|
| 217 | + offload->same_addr_count = 0; |
---|
| 218 | + } |
---|
| 219 | + |
---|
| 220 | + seq->addr = addr; |
---|
| 221 | + seq->modify_mask = field_value_mask->mask; |
---|
| 222 | + seq->modify_value = field_value_mask->value; |
---|
| 223 | + offload->reg_seq_count++; |
---|
| 224 | + |
---|
| 225 | + return field_value_mask->value; |
---|
| 226 | +} |
---|
| 227 | + |
---|
| 228 | +static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, |
---|
| 229 | + uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us) |
---|
| 230 | +{ |
---|
| 231 | + struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; |
---|
| 232 | + struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; |
---|
| 233 | + |
---|
| 234 | + cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT; |
---|
| 235 | + cmd_buf->header.sub_type = 0; |
---|
| 236 | + cmd_buf->reg_wait.addr = addr; |
---|
| 237 | + cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift); |
---|
| 238 | + cmd_buf->reg_wait.mask = mask; |
---|
| 239 | + cmd_buf->reg_wait.time_out_us = time_out_us; |
---|
| 240 | +} |
---|
| 241 | + |
---|
| 242 | +uint32_t generic_reg_update_ex(const struct dc_context *ctx, |
---|
| 243 | + uint32_t addr, int n, |
---|
| 244 | + uint8_t shift1, uint32_t mask1, uint32_t field_value1, |
---|
| 245 | + ...) |
---|
| 246 | +{ |
---|
| 247 | + struct dc_reg_value_masks field_value_mask = {0}; |
---|
| 248 | + uint32_t reg_val; |
---|
| 249 | + va_list ap; |
---|
| 250 | + |
---|
| 251 | + va_start(ap, field_value1); |
---|
| 252 | + |
---|
| 253 | + set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, |
---|
| 254 | + field_value1, ap); |
---|
| 255 | + |
---|
| 256 | + va_end(ap); |
---|
| 257 | + |
---|
| 258 | + if (ctx->dmub_srv && |
---|
| 259 | + ctx->dmub_srv->reg_helper_offload.gather_in_progress) |
---|
| 260 | + return dmub_reg_value_pack(ctx, addr, &field_value_mask); |
---|
| 261 | + /* todo: return void so we can decouple code running in driver from register states */ |
---|
| 262 | + |
---|
| 263 | + /* mmio write directly */ |
---|
| 264 | + reg_val = dm_read_reg(ctx, addr); |
---|
| 265 | + reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; |
---|
| 266 | + dm_write_reg(ctx, addr, reg_val); |
---|
| 267 | + return reg_val; |
---|
| 268 | +} |
---|
| 269 | + |
---|
| 270 | +uint32_t generic_reg_set_ex(const struct dc_context *ctx, |
---|
| 271 | + uint32_t addr, uint32_t reg_val, int n, |
---|
| 272 | + uint8_t shift1, uint32_t mask1, uint32_t field_value1, |
---|
| 273 | + ...) |
---|
| 274 | +{ |
---|
| 275 | + struct dc_reg_value_masks field_value_mask = {0}; |
---|
| 276 | + va_list ap; |
---|
| 277 | + |
---|
| 278 | + va_start(ap, field_value1); |
---|
| 279 | + |
---|
| 280 | + set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, |
---|
| 281 | + field_value1, ap); |
---|
| 282 | + |
---|
| 283 | + va_end(ap); |
---|
| 284 | + |
---|
| 285 | + |
---|
| 286 | + /* mmio write directly */ |
---|
| 287 | + reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; |
---|
| 288 | + |
---|
| 289 | + if (ctx->dmub_srv && |
---|
| 290 | + ctx->dmub_srv->reg_helper_offload.gather_in_progress) { |
---|
| 291 | + return dmub_reg_value_burst_set_pack(ctx, addr, reg_val); |
---|
| 292 | + /* todo: return void so we can decouple code running in driver from register states */ |
---|
52 | 293 | } |
---|
53 | 294 | |
---|
54 | 295 | dm_write_reg(ctx, addr, reg_val); |
---|
55 | | - va_end(ap); |
---|
56 | | - |
---|
57 | 296 | return reg_val; |
---|
| 297 | +} |
---|
| 298 | + |
---|
| 299 | +uint32_t dm_read_reg_func( |
---|
| 300 | + const struct dc_context *ctx, |
---|
| 301 | + uint32_t address, |
---|
| 302 | + const char *func_name) |
---|
| 303 | +{ |
---|
| 304 | + uint32_t value; |
---|
| 305 | +#ifdef DM_CHECK_ADDR_0 |
---|
| 306 | + if (address == 0) { |
---|
| 307 | + DC_ERR("invalid register read; address = 0\n"); |
---|
| 308 | + return 0; |
---|
| 309 | + } |
---|
| 310 | +#endif |
---|
| 311 | + |
---|
| 312 | + if (ctx->dmub_srv && |
---|
| 313 | + ctx->dmub_srv->reg_helper_offload.gather_in_progress && |
---|
| 314 | + !ctx->dmub_srv->reg_helper_offload.should_burst_write) { |
---|
| 315 | + ASSERT(false); |
---|
| 316 | + return 0; |
---|
| 317 | + } |
---|
| 318 | + |
---|
| 319 | + value = cgs_read_register(ctx->cgs_device, address); |
---|
| 320 | + trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); |
---|
| 321 | + |
---|
| 322 | + return value; |
---|
58 | 323 | } |
---|
59 | 324 | |
---|
60 | 325 | uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr, |
---|
.. | .. |
---|
207 | 472 | } |
---|
208 | 473 | */ |
---|
209 | 474 | |
---|
210 | | -uint32_t generic_reg_wait(const struct dc_context *ctx, |
---|
| 475 | +void generic_reg_wait(const struct dc_context *ctx, |
---|
211 | 476 | uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value, |
---|
212 | 477 | unsigned int delay_between_poll_us, unsigned int time_out_num_tries, |
---|
213 | 478 | const char *func_name, int line) |
---|
.. | .. |
---|
216 | 481 | uint32_t reg_val; |
---|
217 | 482 | int i; |
---|
218 | 483 | |
---|
219 | | - /* something is terribly wrong if time out is > 200ms. (5Hz) */ |
---|
220 | | - ASSERT(delay_between_poll_us * time_out_num_tries <= 200000); |
---|
221 | | - |
---|
222 | | - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { |
---|
223 | | - /* 35 seconds */ |
---|
224 | | - delay_between_poll_us = 35000; |
---|
225 | | - time_out_num_tries = 1000; |
---|
| 484 | + if (ctx->dmub_srv && |
---|
| 485 | + ctx->dmub_srv->reg_helper_offload.gather_in_progress) { |
---|
| 486 | + dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value, |
---|
| 487 | + delay_between_poll_us * time_out_num_tries); |
---|
| 488 | + return; |
---|
226 | 489 | } |
---|
| 490 | + |
---|
| 491 | + /* |
---|
| 492 | + * Something is terribly wrong if time out is > 3000ms. |
---|
| 493 | + * 3000ms is the maximum time needed for SMU to pass values back. |
---|
| 494 | + * This value comes from experiments. |
---|
| 495 | + * |
---|
| 496 | + */ |
---|
| 497 | + ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000); |
---|
227 | 498 | |
---|
228 | 499 | for (i = 0; i <= time_out_num_tries; i++) { |
---|
229 | 500 | if (i) { |
---|
.. | .. |
---|
238 | 509 | field_value = get_reg_field_value_ex(reg_val, mask, shift); |
---|
239 | 510 | |
---|
240 | 511 | if (field_value == condition_value) { |
---|
241 | | - if (i * delay_between_poll_us > 1000) |
---|
242 | | - dm_output_to_console("REG_WAIT taking a while: %dms in %s line:%d\n", |
---|
| 512 | + if (i * delay_between_poll_us > 1000 && |
---|
| 513 | + !IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) |
---|
| 514 | + DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", |
---|
243 | 515 | delay_between_poll_us * i / 1000, |
---|
244 | 516 | func_name, line); |
---|
245 | | - return reg_val; |
---|
| 517 | + return; |
---|
246 | 518 | } |
---|
247 | 519 | } |
---|
248 | 520 | |
---|
249 | | - dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n", |
---|
| 521 | + DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n", |
---|
250 | 522 | delay_between_poll_us, time_out_num_tries, |
---|
251 | 523 | func_name, line); |
---|
252 | 524 | |
---|
253 | 525 | if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) |
---|
254 | 526 | BREAK_TO_DEBUGGER(); |
---|
255 | | - |
---|
256 | | - return reg_val; |
---|
257 | 527 | } |
---|
258 | 528 | |
---|
259 | 529 | void generic_write_indirect_reg(const struct dc_context *ctx, |
---|
.. | .. |
---|
270 | 540 | { |
---|
271 | 541 | uint32_t value = 0; |
---|
272 | 542 | |
---|
| 543 | + // when reg read, there should not be any offload. |
---|
| 544 | + if (ctx->dmub_srv && |
---|
| 545 | + ctx->dmub_srv->reg_helper_offload.gather_in_progress) { |
---|
| 546 | + ASSERT(false); |
---|
| 547 | + } |
---|
| 548 | + |
---|
273 | 549 | dm_write_reg(ctx, addr_index, index); |
---|
274 | 550 | value = dm_read_reg(ctx, addr_data); |
---|
275 | 551 | |
---|
276 | 552 | return value; |
---|
277 | 553 | } |
---|
278 | 554 | |
---|
| 555 | +uint32_t generic_indirect_reg_get(const struct dc_context *ctx, |
---|
| 556 | + uint32_t addr_index, uint32_t addr_data, |
---|
| 557 | + uint32_t index, int n, |
---|
| 558 | + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, |
---|
| 559 | + ...) |
---|
| 560 | +{ |
---|
| 561 | + uint32_t shift, mask, *field_value; |
---|
| 562 | + uint32_t value = 0; |
---|
| 563 | + int i = 1; |
---|
| 564 | + |
---|
| 565 | + va_list ap; |
---|
| 566 | + |
---|
| 567 | + va_start(ap, field_value1); |
---|
| 568 | + |
---|
| 569 | + value = generic_read_indirect_reg(ctx, addr_index, addr_data, index); |
---|
| 570 | + *field_value1 = get_reg_field_value_ex(value, mask1, shift1); |
---|
| 571 | + |
---|
| 572 | + while (i < n) { |
---|
| 573 | + shift = va_arg(ap, uint32_t); |
---|
| 574 | + mask = va_arg(ap, uint32_t); |
---|
| 575 | + field_value = va_arg(ap, uint32_t *); |
---|
| 576 | + |
---|
| 577 | + *field_value = get_reg_field_value_ex(value, mask, shift); |
---|
| 578 | + i++; |
---|
| 579 | + } |
---|
| 580 | + |
---|
| 581 | + va_end(ap); |
---|
| 582 | + |
---|
| 583 | + return value; |
---|
| 584 | +} |
---|
279 | 585 | |
---|
280 | 586 | uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, |
---|
281 | 587 | uint32_t addr_index, uint32_t addr_data, |
---|
.. | .. |
---|
306 | 612 | |
---|
307 | 613 | return reg_val; |
---|
308 | 614 | } |
---|
| 615 | + |
---|
| 616 | +void reg_sequence_start_gather(const struct dc_context *ctx) |
---|
| 617 | +{ |
---|
| 618 | + /* if reg sequence is supported and enabled, set flag to |
---|
| 619 | + * indicate we want to have REG_SET, REG_UPDATE macro build |
---|
| 620 | + * reg sequence command buffer rather than MMIO directly. |
---|
| 621 | + */ |
---|
| 622 | + |
---|
| 623 | + if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) { |
---|
| 624 | + struct dc_reg_helper_state *offload = |
---|
| 625 | + &ctx->dmub_srv->reg_helper_offload; |
---|
| 626 | + |
---|
| 627 | + /* caller sequence mismatch. need to debug caller. offload will not work!!! */ |
---|
| 628 | + ASSERT(!offload->gather_in_progress); |
---|
| 629 | + |
---|
| 630 | + offload->gather_in_progress = true; |
---|
| 631 | + } |
---|
| 632 | +} |
---|
| 633 | + |
---|
| 634 | +void reg_sequence_start_execute(const struct dc_context *ctx) |
---|
| 635 | +{ |
---|
| 636 | + struct dc_reg_helper_state *offload; |
---|
| 637 | + |
---|
| 638 | + if (!ctx->dmub_srv) |
---|
| 639 | + return; |
---|
| 640 | + |
---|
| 641 | + offload = &ctx->dmub_srv->reg_helper_offload; |
---|
| 642 | + |
---|
| 643 | + if (offload && offload->gather_in_progress) { |
---|
| 644 | + offload->gather_in_progress = false; |
---|
| 645 | + offload->should_burst_write = false; |
---|
| 646 | + switch (offload->cmd_data.cmd_common.header.type) { |
---|
| 647 | + case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE: |
---|
| 648 | + submit_dmub_read_modify_write(offload, ctx); |
---|
| 649 | + break; |
---|
| 650 | + case DMUB_CMD__REG_REG_WAIT: |
---|
| 651 | + submit_dmub_reg_wait(offload, ctx); |
---|
| 652 | + break; |
---|
| 653 | + case DMUB_CMD__REG_SEQ_BURST_WRITE: |
---|
| 654 | + submit_dmub_burst_write(offload, ctx); |
---|
| 655 | + break; |
---|
| 656 | + default: |
---|
| 657 | + return; |
---|
| 658 | + } |
---|
| 659 | + |
---|
| 660 | + dc_dmub_srv_cmd_execute(ctx->dmub_srv); |
---|
| 661 | + } |
---|
| 662 | +} |
---|
| 663 | + |
---|
| 664 | +void reg_sequence_wait_done(const struct dc_context *ctx) |
---|
| 665 | +{ |
---|
| 666 | + /* callback to DM to poll for last submission done*/ |
---|
| 667 | + struct dc_reg_helper_state *offload; |
---|
| 668 | + |
---|
| 669 | + if (!ctx->dmub_srv) |
---|
| 670 | + return; |
---|
| 671 | + |
---|
| 672 | + offload = &ctx->dmub_srv->reg_helper_offload; |
---|
| 673 | + |
---|
| 674 | + if (offload && |
---|
| 675 | + ctx->dc->debug.dmub_offload_enabled && |
---|
| 676 | + !ctx->dc->debug.dmcub_emulation) { |
---|
| 677 | + dc_dmub_srv_wait_idle(ctx->dmub_srv); |
---|
| 678 | + } |
---|
| 679 | +} |
---|