1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include "ivpu_drv.h" 7 #include "ivpu_fw.h" 8 #include "ivpu_hw_37xx_reg.h" 9 #include "ivpu_hw_reg_io.h" 10 #include "ivpu_hw.h" 11 #include "ivpu_ipc.h" 12 #include "ivpu_mmu.h" 13 #include "ivpu_pm.h" 14 15 #define TILE_FUSE_ENABLE_BOTH 0x0 16 #define TILE_SKU_BOTH_MTL 0x3630 17 18 /* Work point configuration values */ 19 #define CONFIG_1_TILE 0x01 20 #define CONFIG_2_TILE 0x02 21 #define PLL_RATIO_5_3 0x01 22 #define PLL_RATIO_4_3 0x02 23 #define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio)) 24 #define WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3) 25 #define WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3) 26 #define WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3) 27 #define WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3) 28 #define WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0) 29 30 #define PLL_REF_CLK_FREQ (50 * 1000000) 31 #define PLL_SIMULATION_FREQ (10 * 1000000) 32 #define PLL_PROF_CLK_FREQ (38400 * 1000) 33 #define PLL_DEFAULT_EPP_VALUE 0x80 34 35 #define TIM_SAFE_ENABLE 0xf1d0dead 36 #define TIM_WATCHDOG_RESET_VALUE 0xffffffff 37 38 #define TIMEOUT_US (150 * USEC_PER_MSEC) 39 #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) 40 #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) 41 #define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC) 42 43 #define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ 44 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ 45 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \ 46 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \ 47 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \ 48 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \ 49 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT))) 50 51 #define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \ 52 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \ 53 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT))) 54 55 #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK) 56 57 #define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \ 58 (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR))) 59 60 #define BUTTRESS_ALL_IRQ_MASK (BUTTRESS_IRQ_MASK | \ 61 (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE))) 62 63 #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK) 64 #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1) 65 66 #define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \ 67 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \ 68 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \ 69 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \ 70 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \ 71 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \ 72 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX))) 73 74 static void ivpu_hw_wa_init(struct ivpu_device *vdev) 75 { 76 vdev->wa.punit_disabled = false; 77 vdev->wa.clear_runtime_mem = false; 78 vdev->wa.d3hot_after_power_off = true; 79 80 REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK); 81 if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) { 82 /* Writing 1s does not clear the interrupt status register */ 83 vdev->wa.interrupt_clear_with_0 = true; 84 REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0); 85 } 86 87 IVPU_PRINT_WA(punit_disabled); 88 IVPU_PRINT_WA(clear_runtime_mem); 89 IVPU_PRINT_WA(d3hot_after_power_off); 90 IVPU_PRINT_WA(interrupt_clear_with_0); 91 } 92 93 static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) 94 { 95 vdev->timeout.boot = 1000; 96 vdev->timeout.jsm = 500; 97 vdev->timeout.tdr = 2000; 98 vdev->timeout.reschedule_suspend = 10; 99 vdev->timeout.autosuspend = 10; 100 vdev->timeout.d0i3_entry_msg = 5; 101 } 102 103 static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev) 104 { 105 return REGB_POLL_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); 106 } 107 108 /* Send KMD initiated workpoint change */ 109 static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio, 110 u16 target_ratio, u16 config) 111 { 112 int ret; 113 u32 val; 114 115 ret = ivpu_pll_wait_for_cmd_send(vdev); 116 if (ret) { 117 ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret); 118 return ret; 119 } 120 121 val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0); 122 val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val); 123 val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val); 124 REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, val); 125 126 val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1); 127 val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val); 128 val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val); 129 REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, val); 130 131 val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2); 132 val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val); 133 REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, val); 134 135 val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_CMD); 136 val = REG_SET_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, val); 137 REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_CMD, val); 138 139 ret = ivpu_pll_wait_for_cmd_send(vdev); 140 if (ret) 141 ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret); 142 143 return ret; 144 } 145 146 static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable) 147 { 148 u32 exp_val = enable ? 0x1 : 0x0; 149 150 if (IVPU_WA(punit_disabled)) 151 return 0; 152 153 return REGB_POLL_FLD(VPU_37XX_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US); 154 } 155 156 static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev) 157 { 158 if (IVPU_WA(punit_disabled)) 159 return 0; 160 161 return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US); 162 } 163 164 static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev) 165 { 166 struct ivpu_hw_info *hw = vdev->hw; 167 u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio; 168 u32 fmin_fuse, fmax_fuse; 169 170 fmin_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMIN_FUSE); 171 fuse_min_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse); 172 fuse_pn_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse); 173 174 fmax_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMAX_FUSE); 175 fuse_max_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse); 176 177 hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio); 178 hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio); 179 hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio); 180 } 181 182 static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev) 183 { 184 return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100); 185 } 186 187 static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable) 188 { 189 struct ivpu_hw_info *hw = vdev->hw; 190 u16 target_ratio; 191 u16 config; 192 int ret; 193 194 if (IVPU_WA(punit_disabled)) { 195 ivpu_dbg(vdev, PM, "Skipping PLL request\n"); 196 return 0; 197 } 198 199 if (enable) { 200 target_ratio = hw->pll.pn_ratio; 201 config = hw->config; 202 } else { 203 target_ratio = 0; 204 config = 0; 205 } 206 207 ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n", 208 config, target_ratio); 209 210 ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config); 211 if (ret) { 212 ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret); 213 return ret; 214 } 215 216 ret = ivpu_pll_wait_for_lock(vdev, enable); 217 if (ret) { 218 ivpu_err(vdev, "Timed out waiting for PLL lock\n"); 219 return ret; 220 } 221 222 if (enable) { 223 ret = ivpu_pll_wait_for_status_ready(vdev); 224 if (ret) { 225 ivpu_err(vdev, "Timed out waiting for PLL ready status\n"); 226 return ret; 227 } 228 229 ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev); 230 if (ret) { 231 ivpu_err(vdev, "Timed out waiting for VPUIP bar\n"); 232 return ret; 233 } 234 } 235 236 return 0; 237 } 238 239 static int ivpu_pll_enable(struct ivpu_device *vdev) 240 { 241 return ivpu_pll_drive(vdev, true); 242 } 243 244 static int ivpu_pll_disable(struct ivpu_device *vdev) 245 { 246 return ivpu_pll_drive(vdev, false); 247 } 248 249 static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev) 250 { 251 u32 val = 0; 252 253 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val); 254 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val); 255 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val); 256 257 REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val); 258 } 259 260 static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable) 261 { 262 u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET); 263 264 if (enable) { 265 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val); 266 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val); 267 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val); 268 } else { 269 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val); 270 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val); 271 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val); 272 } 273 274 REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val); 275 } 276 277 static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable) 278 { 279 u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET); 280 281 if (enable) { 282 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val); 283 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val); 284 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val); 285 } else { 286 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val); 287 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val); 288 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val); 289 } 290 291 REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val); 292 } 293 294 static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val) 295 { 296 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN); 297 298 if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val)) 299 return -EIO; 300 301 return 0; 302 } 303 304 static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 305 { 306 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN); 307 308 if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val)) 309 return -EIO; 310 311 return 0; 312 } 313 314 static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 315 { 316 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY); 317 318 if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val)) 319 return -EIO; 320 321 return 0; 322 } 323 324 static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val) 325 { 326 u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN); 327 328 if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) || 329 !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val)) 330 return -EIO; 331 332 return 0; 333 } 334 335 static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 336 { 337 u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN); 338 339 if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) || 340 !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val)) 341 return -EIO; 342 343 return 0; 344 } 345 346 static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 347 { 348 u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY); 349 350 if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) || 351 !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val)) 352 return -EIO; 353 354 return 0; 355 } 356 357 static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev) 358 { 359 ivpu_boot_host_ss_rst_clr_assert(vdev); 360 361 return ivpu_boot_noc_qreqn_check(vdev, 0x0); 362 } 363 364 static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev) 365 { 366 REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0); 367 } 368 369 static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable) 370 { 371 int ret; 372 u32 val; 373 374 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN); 375 if (enable) 376 val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 377 else 378 val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 379 REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val); 380 381 ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 382 if (ret) { 383 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 384 return ret; 385 } 386 387 ret = ivpu_boot_noc_qdeny_check(vdev, 0x0); 388 if (ret) 389 ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 390 391 return ret; 392 } 393 394 static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev) 395 { 396 return ivpu_boot_host_ss_axi_drive(vdev, true); 397 } 398 399 static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable) 400 { 401 int ret; 402 u32 val; 403 404 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN); 405 if (enable) { 406 val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val); 407 val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 408 } else { 409 val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val); 410 val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 411 } 412 REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val); 413 414 ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 415 if (ret) { 416 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 417 return ret; 418 } 419 420 ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0); 421 if (ret) 422 ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 423 424 return ret; 425 } 426 427 static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev) 428 { 429 return ivpu_boot_host_ss_top_noc_drive(vdev, true); 430 } 431 432 static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable) 433 { 434 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); 435 436 if (enable) 437 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); 438 else 439 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); 440 441 REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); 442 } 443 444 static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable) 445 { 446 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0); 447 448 if (enable) 449 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); 450 else 451 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); 452 453 REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val); 454 } 455 456 static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val) 457 { 458 return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, 459 exp_val, PWR_ISLAND_STATUS_TIMEOUT_US); 460 } 461 462 static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable) 463 { 464 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0); 465 466 if (enable) 467 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); 468 else 469 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); 470 471 REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val); 472 } 473 474 static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable) 475 { 476 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE); 477 478 if (enable) 479 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); 480 else 481 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); 482 483 REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val); 484 } 485 486 static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) 487 { 488 int ret; 489 490 ivpu_boot_pwr_island_trickle_drive(vdev, true); 491 ivpu_boot_pwr_island_drive(vdev, true); 492 493 ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1); 494 if (ret) { 495 ivpu_err(vdev, "Timed out waiting for power island status\n"); 496 return ret; 497 } 498 499 ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0); 500 if (ret) { 501 ivpu_err(vdev, "Failed qrenqn check %d\n", ret); 502 return ret; 503 } 504 505 ivpu_boot_host_ss_clk_drive(vdev, true); 506 ivpu_boot_pwr_island_isolation_drive(vdev, false); 507 ivpu_boot_host_ss_rst_drive(vdev, true); 508 ivpu_boot_dpu_active_drive(vdev, true); 509 510 return ret; 511 } 512 513 static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev) 514 { 515 ivpu_boot_dpu_active_drive(vdev, false); 516 ivpu_boot_pwr_island_isolation_drive(vdev, true); 517 ivpu_boot_pwr_island_trickle_drive(vdev, false); 518 ivpu_boot_pwr_island_drive(vdev, false); 519 520 return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0); 521 } 522 523 static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) 524 { 525 u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES); 526 527 val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val); 528 val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); 529 val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val); 530 531 REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val); 532 } 533 534 static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev) 535 { 536 u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV); 537 538 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); 539 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); 540 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); 541 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); 542 543 REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val); 544 } 545 546 static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev) 547 { 548 u32 val; 549 550 val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC); 551 val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val); 552 553 val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val); 554 REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); 555 556 val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); 557 REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); 558 559 val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); 560 REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); 561 562 val = vdev->fw->entry_point >> 9; 563 REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val); 564 565 val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val); 566 REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val); 567 568 ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n", 569 vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume"); 570 } 571 572 static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable) 573 { 574 int ret; 575 u32 val; 576 577 ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 578 if (ret) { 579 ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); 580 return ret; 581 } 582 583 val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL); 584 if (enable) 585 val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val); 586 else 587 val = REG_CLR_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val); 588 REGB_WR32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, val); 589 590 ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 591 if (ret) 592 ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); 593 594 return ret; 595 } 596 597 static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev) 598 { 599 struct ivpu_hw_info *hw = vdev->hw; 600 601 hw->tile_fuse = TILE_FUSE_ENABLE_BOTH; 602 hw->sku = TILE_SKU_BOTH_MTL; 603 hw->config = WP_CONFIG_2_TILE_4_3_RATIO; 604 605 ivpu_pll_init_frequency_ratios(vdev); 606 607 ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M); 608 ivpu_hw_init_range(&hw->ranges.user, 0xc0000000, 255 * SZ_1M); 609 ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G); 610 ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G); 611 612 vdev->platform = IVPU_PLATFORM_SILICON; 613 ivpu_hw_wa_init(vdev); 614 ivpu_hw_timeouts_init(vdev); 615 616 return 0; 617 } 618 619 static int ivpu_hw_37xx_reset(struct ivpu_device *vdev) 620 { 621 int ret = 0; 622 623 if (ivpu_boot_pwr_domain_disable(vdev)) { 624 ivpu_err(vdev, "Failed to disable power domain\n"); 625 ret = -EIO; 626 } 627 628 if (ivpu_pll_disable(vdev)) { 629 ivpu_err(vdev, "Failed to disable PLL\n"); 630 ret = -EIO; 631 } 632 633 return ret; 634 } 635 636 static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev) 637 { 638 int ret; 639 640 ret = ivpu_boot_d0i3_drive(vdev, true); 641 if (ret) 642 ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret); 643 644 udelay(5); /* VPU requires 5 us to complete the transition */ 645 646 return ret; 647 } 648 649 static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev) 650 { 651 int ret; 652 653 ret = ivpu_boot_d0i3_drive(vdev, false); 654 if (ret) 655 ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret); 656 657 return ret; 658 } 659 660 static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev) 661 { 662 int ret; 663 664 ret = ivpu_hw_37xx_d0i3_disable(vdev); 665 if (ret) 666 ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); 667 668 ret = ivpu_pll_enable(vdev); 669 if (ret) { 670 ivpu_err(vdev, "Failed to enable PLL: %d\n", ret); 671 return ret; 672 } 673 674 ret = ivpu_boot_host_ss_configure(vdev); 675 if (ret) { 676 ivpu_err(vdev, "Failed to configure host SS: %d\n", ret); 677 return ret; 678 } 679 680 /* 681 * The control circuitry for vpu_idle indication logic powers up active. 682 * To ensure unnecessary low power mode signal from LRT during bring up, 683 * KMD disables the circuitry prior to bringing up the Main Power island. 684 */ 685 ivpu_boot_vpu_idle_gen_disable(vdev); 686 687 ret = ivpu_boot_pwr_domain_enable(vdev); 688 if (ret) { 689 ivpu_err(vdev, "Failed to enable power domain: %d\n", ret); 690 return ret; 691 } 692 693 ret = ivpu_boot_host_ss_axi_enable(vdev); 694 if (ret) { 695 ivpu_err(vdev, "Failed to enable AXI: %d\n", ret); 696 return ret; 697 } 698 699 ret = ivpu_boot_host_ss_top_noc_enable(vdev); 700 if (ret) 701 ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret); 702 703 return ret; 704 } 705 706 static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev) 707 { 708 ivpu_boot_no_snoop_enable(vdev); 709 ivpu_boot_tbu_mmu_enable(vdev); 710 ivpu_boot_soc_cpu_boot(vdev); 711 712 return 0; 713 } 714 715 static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev) 716 { 717 u32 val; 718 719 if (IVPU_WA(punit_disabled)) 720 return true; 721 722 val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_STATUS); 723 return REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, val) && 724 REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val); 725 } 726 727 static int ivpu_hw_37xx_wait_for_idle(struct ivpu_device *vdev) 728 { 729 return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US); 730 } 731 732 static void ivpu_hw_37xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev) 733 { 734 vdev->hw->d0i3_entry_host_ts = ktime_get_boottime(); 735 vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT); 736 } 737 738 static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev) 739 { 740 int ret = 0; 741 742 ivpu_hw_37xx_save_d0i3_entry_timestamp(vdev); 743 744 if (!ivpu_hw_37xx_is_idle(vdev)) 745 ivpu_warn(vdev, "VPU not idle during power down\n"); 746 747 if (ivpu_hw_37xx_reset(vdev)) { 748 ivpu_err(vdev, "Failed to reset VPU\n"); 749 ret = -EIO; 750 } 751 752 if (ivpu_hw_37xx_d0i3_enable(vdev)) { 753 ivpu_err(vdev, "Failed to enter D0I3\n"); 754 ret = -EIO; 755 } 756 757 return ret; 758 } 759 760 static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev) 761 { 762 u32 val; 763 764 /* Enable writing and set non-zero WDT value */ 765 REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 766 REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE); 767 768 /* Enable writing and disable watchdog timer */ 769 REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 770 REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0); 771 772 /* Now clear the timeout interrupt */ 773 val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG); 774 val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val); 775 REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val); 776 } 777 778 static u32 ivpu_hw_37xx_profiling_freq_get(struct ivpu_device *vdev) 779 { 780 return PLL_PROF_CLK_FREQ; 781 } 782 783 static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable) 784 { 785 /* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */ 786 } 787 788 static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config) 789 { 790 u32 pll_clock = PLL_REF_CLK_FREQ * ratio; 791 u32 cpu_clock; 792 793 if ((config & 0xff) == PLL_RATIO_4_3) 794 cpu_clock = pll_clock * 2 / 4; 795 else 796 cpu_clock = pll_clock * 2 / 5; 797 798 return cpu_clock; 799 } 800 801 /* Register indirect accesses */ 802 static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev) 803 { 804 u32 pll_curr_ratio; 805 806 pll_curr_ratio = REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL); 807 pll_curr_ratio &= VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK; 808 809 if (!ivpu_is_silicon(vdev)) 810 return PLL_SIMULATION_FREQ; 811 812 return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config); 813 } 814 815 static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev) 816 { 817 return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET); 818 } 819 820 static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev) 821 { 822 return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE); 823 } 824 825 static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev) 826 { 827 return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE); 828 } 829 830 static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id) 831 { 832 u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0; 833 u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET); 834 835 REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val); 836 } 837 838 static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev) 839 { 840 return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM); 841 } 842 843 static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev) 844 { 845 u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT); 846 847 return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count); 848 } 849 850 static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) 851 { 852 REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr); 853 } 854 855 static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev) 856 { 857 REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK); 858 } 859 860 static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev) 861 { 862 REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK); 863 REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK); 864 REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK); 865 REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); 866 } 867 868 static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev) 869 { 870 REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); 871 REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK); 872 REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull); 873 REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0); 874 } 875 876 static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev) 877 { 878 ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ"); 879 } 880 881 static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev) 882 { 883 ivpu_hw_wdt_disable(vdev); 884 ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ"); 885 } 886 887 static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev) 888 { 889 ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ"); 890 } 891 892 /* Handler for IRQs from VPU core (irqV) */ 893 static bool ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread) 894 { 895 u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; 896 897 if (!status) 898 return false; 899 900 REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status); 901 902 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) 903 ivpu_mmu_irq_evtq_handler(vdev); 904 905 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) 906 ivpu_ipc_irq_handler(vdev, wake_thread); 907 908 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) 909 ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); 910 911 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status)) 912 ivpu_mmu_irq_gerr_handler(vdev); 913 914 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status)) 915 ivpu_hw_37xx_irq_wdt_mss_handler(vdev); 916 917 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status)) 918 ivpu_hw_37xx_irq_wdt_nce_handler(vdev); 919 920 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) 921 ivpu_hw_37xx_irq_noc_firewall_handler(vdev); 922 923 return true; 924 } 925 926 /* Handler for IRQs from Buttress core (irqB) */ 927 static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq) 928 { 929 u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; 930 bool schedule_recovery = false; 931 932 if (!status) 933 return false; 934 935 if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) 936 ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", 937 REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL)); 938 939 if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) { 940 ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0)); 941 REGB_WR32(VPU_37XX_BUTTRESS_ATS_ERR_CLEAR, 0x1); 942 schedule_recovery = true; 943 } 944 945 if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) { 946 u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG); 947 948 ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", 949 ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log), 950 REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log), 951 REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log)); 952 REGB_WR32(VPU_37XX_BUTTRESS_UFI_ERR_CLEAR, 0x1); 953 schedule_recovery = true; 954 } 955 956 /* This must be done after interrupts are cleared at the source. */ 957 if (IVPU_WA(interrupt_clear_with_0)) 958 /* 959 * Writing 1 triggers an interrupt, so we can't perform read update write. 960 * Clear local interrupt status by writing 0 to all bits. 961 */ 962 REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0); 963 else 964 REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status); 965 966 if (schedule_recovery) 967 ivpu_pm_trigger_recovery(vdev, "Buttress IRQ"); 968 969 return true; 970 } 971 972 static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr) 973 { 974 struct ivpu_device *vdev = ptr; 975 bool irqv_handled, irqb_handled, wake_thread = false; 976 977 REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); 978 979 irqv_handled = ivpu_hw_37xx_irqv_handler(vdev, irq, &wake_thread); 980 irqb_handled = ivpu_hw_37xx_irqb_handler(vdev, irq); 981 982 /* Re-enable global interrupts to re-trigger MSI for pending interrupts */ 983 REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); 984 985 if (wake_thread) 986 return IRQ_WAKE_THREAD; 987 if (irqv_handled || irqb_handled) 988 return IRQ_HANDLED; 989 return IRQ_NONE; 990 } 991 992 static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev) 993 { 994 u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; 995 u32 irqb = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; 996 997 if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev)) 998 ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ"); 999 1000 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv)) 1001 ivpu_err(vdev, "WDT MSS timeout detected\n"); 1002 1003 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv)) 1004 ivpu_err(vdev, "WDT NCE timeout detected\n"); 1005 1006 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv)) 1007 ivpu_err(vdev, "NOC Firewall irq detected\n"); 1008 1009 if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) 1010 ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0)); 1011 1012 if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) { 1013 u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG); 1014 1015 ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", 1016 ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log), 1017 REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log), 1018 REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log)); 1019 } 1020 } 1021 1022 const struct ivpu_hw_ops ivpu_hw_37xx_ops = { 1023 .info_init = ivpu_hw_37xx_info_init, 1024 .power_up = ivpu_hw_37xx_power_up, 1025 .is_idle = ivpu_hw_37xx_is_idle, 1026 .wait_for_idle = ivpu_hw_37xx_wait_for_idle, 1027 .power_down = ivpu_hw_37xx_power_down, 1028 .reset = ivpu_hw_37xx_reset, 1029 .boot_fw = ivpu_hw_37xx_boot_fw, 1030 .wdt_disable = ivpu_hw_37xx_wdt_disable, 1031 .diagnose_failure = ivpu_hw_37xx_diagnose_failure, 1032 .profiling_freq_get = ivpu_hw_37xx_profiling_freq_get, 1033 .profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive, 1034 .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get, 1035 .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get, 1036 .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get, 1037 .reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get, 1038 .reg_db_set = ivpu_hw_37xx_reg_db_set, 1039 .reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get, 1040 .reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get, 1041 .reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set, 1042 .irq_clear = ivpu_hw_37xx_irq_clear, 1043 .irq_enable = ivpu_hw_37xx_irq_enable, 1044 .irq_disable = ivpu_hw_37xx_irq_disable, 1045 .irq_handler = ivpu_hw_37xx_irq_handler, 1046 }; 1047