1 /* 2 * Linux network driver for QLogic BR-series Converged Network Adapter. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License (GPL) Version 2 as 6 * published by the Free Software Foundation 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 /* 14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 15 * Copyright (c) 2014-2015 QLogic Corporation 16 * All rights reserved 17 * www.qlogic.com 18 */ 19 20 #include "bfa_ioc.h" 21 #include "bfi_reg.h" 22 #include "bfa_defs.h" 23 24 /* IOC local definitions */ 25 26 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ 27 28 #define bfa_ioc_firmware_lock(__ioc) \ 29 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) 30 #define bfa_ioc_firmware_unlock(__ioc) \ 31 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 32 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 33 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 34 #define bfa_ioc_notify_fail(__ioc) \ 35 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 36 #define bfa_ioc_sync_start(__ioc) \ 37 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) 38 #define bfa_ioc_sync_join(__ioc) \ 39 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 40 #define bfa_ioc_sync_leave(__ioc) \ 41 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) 42 #define bfa_ioc_sync_ack(__ioc) \ 43 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) 44 #define bfa_ioc_sync_complete(__ioc) \ 45 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) 46 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ 47 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) 48 #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ 49 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) 50 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ 51 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) 52 53 static bool bfa_nw_auto_recover = true; 54 55 /* 56 * forward declarations 57 */ 58 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc); 59 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc); 60 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc); 61 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force); 62 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc); 63 static void bfa_ioc_send_enable(struct bfa_ioc *ioc); 64 static void bfa_ioc_send_disable(struct bfa_ioc *ioc); 65 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc); 66 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc); 67 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc); 68 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force); 69 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); 70 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc); 71 static void bfa_ioc_recover(struct bfa_ioc *ioc); 72 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); 73 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 74 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 75 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc); 76 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); 77 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); 78 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); 79 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); 80 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); 81 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); 82 static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc, 83 enum bfi_fwboot_type boot_type, u32 boot_param); 84 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); 85 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, 86 char *serial_num); 87 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, 88 char *fw_ver); 89 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, 90 char *chip_rev); 91 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, 92 char *optrom_ver); 93 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, 94 char *manufacturer); 95 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); 96 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); 97 98 /* IOC state machine definitions/declarations */ 99 enum ioc_event { 100 IOC_E_RESET = 1, /*!< IOC reset request */ 101 IOC_E_ENABLE = 2, /*!< IOC enable request */ 102 IOC_E_DISABLE = 3, /*!< IOC disable request */ 103 IOC_E_DETACH = 4, /*!< driver detach cleanup */ 104 IOC_E_ENABLED = 5, /*!< f/w enabled */ 105 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */ 106 IOC_E_DISABLED = 7, /*!< f/w disabled */ 107 IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */ 108 IOC_E_HBFAIL = 9, /*!< heartbeat failure */ 109 IOC_E_HWERROR = 10, /*!< hardware error interrupt */ 110 IOC_E_TIMEOUT = 11, /*!< timeout */ 111 IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */ 112 }; 113 114 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event); 115 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); 116 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); 117 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); 118 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); 119 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event); 120 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event); 121 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); 122 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); 123 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event); 124 125 static struct bfa_sm_table ioc_sm_table[] = { 126 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, 127 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, 128 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, 129 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, 130 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, 131 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, 132 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, 133 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 134 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 135 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, 136 }; 137 138 /* 139 * Forward declareations for iocpf state machine 140 */ 141 static void bfa_iocpf_enable(struct bfa_ioc *ioc); 142 static void bfa_iocpf_disable(struct bfa_ioc *ioc); 143 static void bfa_iocpf_fail(struct bfa_ioc *ioc); 144 static void bfa_iocpf_initfail(struct bfa_ioc *ioc); 145 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); 146 static void bfa_iocpf_stop(struct bfa_ioc *ioc); 147 148 /* IOCPF state machine events */ 149 enum iocpf_event { 150 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ 151 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ 152 IOCPF_E_STOP = 3, /*!< stop on driver detach */ 153 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */ 154 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */ 155 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */ 156 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */ 157 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */ 158 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */ 159 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ 160 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */ 161 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */ 162 }; 163 164 /* IOCPF states */ 165 enum bfa_iocpf_state { 166 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ 167 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ 168 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */ 169 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */ 170 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */ 171 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */ 172 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */ 173 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */ 174 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */ 175 }; 176 177 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event); 178 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event); 179 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event); 180 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event); 181 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event); 182 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event); 183 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event); 184 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf, 185 enum iocpf_event); 186 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event); 187 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event); 188 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event); 189 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event); 190 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf, 191 enum iocpf_event); 192 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event); 193 194 static struct bfa_sm_table iocpf_sm_table[] = { 195 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, 196 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, 197 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, 198 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, 199 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, 200 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, 201 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, 202 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, 203 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, 204 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, 205 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, 206 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, 207 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, 208 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 209 }; 210 211 /* IOC State Machine */ 212 213 /* Beginning state. IOC uninit state. */ 214 static void 215 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) 216 { 217 } 218 219 /* IOC is in uninit state. */ 220 static void 221 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) 222 { 223 switch (event) { 224 case IOC_E_RESET: 225 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 226 break; 227 228 default: 229 bfa_sm_fault(event); 230 } 231 } 232 233 /* Reset entry actions -- initialize state machine */ 234 static void 235 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) 236 { 237 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 238 } 239 240 /* IOC is in reset state. */ 241 static void 242 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) 243 { 244 switch (event) { 245 case IOC_E_ENABLE: 246 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 247 break; 248 249 case IOC_E_DISABLE: 250 bfa_ioc_disable_comp(ioc); 251 break; 252 253 case IOC_E_DETACH: 254 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 255 break; 256 257 default: 258 bfa_sm_fault(event); 259 } 260 } 261 262 static void 263 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) 264 { 265 bfa_iocpf_enable(ioc); 266 } 267 268 /* Host IOC function is being enabled, awaiting response from firmware. 269 * Semaphore is acquired. 270 */ 271 static void 272 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) 273 { 274 switch (event) { 275 case IOC_E_ENABLED: 276 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 277 break; 278 279 case IOC_E_PFFAILED: 280 /* !!! fall through !!! */ 281 case IOC_E_HWERROR: 282 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 283 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 284 if (event != IOC_E_PFFAILED) 285 bfa_iocpf_initfail(ioc); 286 break; 287 288 case IOC_E_HWFAILED: 289 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 290 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 291 break; 292 293 case IOC_E_DISABLE: 294 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 295 break; 296 297 case IOC_E_DETACH: 298 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 299 bfa_iocpf_stop(ioc); 300 break; 301 302 case IOC_E_ENABLE: 303 break; 304 305 default: 306 bfa_sm_fault(event); 307 } 308 } 309 310 /* Semaphore should be acquired for version check. */ 311 static void 312 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) 313 { 314 mod_timer(&ioc->ioc_timer, jiffies + 315 msecs_to_jiffies(BFA_IOC_TOV)); 316 bfa_ioc_send_getattr(ioc); 317 } 318 319 /* IOC configuration in progress. Timer is active. */ 320 static void 321 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) 322 { 323 switch (event) { 324 case IOC_E_FWRSP_GETATTR: 325 del_timer(&ioc->ioc_timer); 326 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 327 break; 328 329 case IOC_E_PFFAILED: 330 case IOC_E_HWERROR: 331 del_timer(&ioc->ioc_timer); 332 /* fall through */ 333 case IOC_E_TIMEOUT: 334 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 335 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 336 if (event != IOC_E_PFFAILED) 337 bfa_iocpf_getattrfail(ioc); 338 break; 339 340 case IOC_E_DISABLE: 341 del_timer(&ioc->ioc_timer); 342 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 343 break; 344 345 case IOC_E_ENABLE: 346 break; 347 348 default: 349 bfa_sm_fault(event); 350 } 351 } 352 353 static void 354 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) 355 { 356 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 357 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); 358 bfa_ioc_hb_monitor(ioc); 359 } 360 361 static void 362 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) 363 { 364 switch (event) { 365 case IOC_E_ENABLE: 366 break; 367 368 case IOC_E_DISABLE: 369 bfa_ioc_hb_stop(ioc); 370 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 371 break; 372 373 case IOC_E_PFFAILED: 374 case IOC_E_HWERROR: 375 bfa_ioc_hb_stop(ioc); 376 /* !!! fall through !!! */ 377 case IOC_E_HBFAIL: 378 if (ioc->iocpf.auto_recover) 379 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 380 else 381 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 382 383 bfa_ioc_fail_notify(ioc); 384 385 if (event != IOC_E_PFFAILED) 386 bfa_iocpf_fail(ioc); 387 break; 388 389 default: 390 bfa_sm_fault(event); 391 } 392 } 393 394 static void 395 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) 396 { 397 bfa_iocpf_disable(ioc); 398 } 399 400 /* IOC is being disabled */ 401 static void 402 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) 403 { 404 switch (event) { 405 case IOC_E_DISABLED: 406 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 407 break; 408 409 case IOC_E_HWERROR: 410 /* 411 * No state change. Will move to disabled state 412 * after iocpf sm completes failure processing and 413 * moves to disabled state. 414 */ 415 bfa_iocpf_fail(ioc); 416 break; 417 418 case IOC_E_HWFAILED: 419 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 420 bfa_ioc_disable_comp(ioc); 421 break; 422 423 default: 424 bfa_sm_fault(event); 425 } 426 } 427 428 /* IOC disable completion entry. */ 429 static void 430 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) 431 { 432 bfa_ioc_disable_comp(ioc); 433 } 434 435 static void 436 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) 437 { 438 switch (event) { 439 case IOC_E_ENABLE: 440 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 441 break; 442 443 case IOC_E_DISABLE: 444 ioc->cbfn->disable_cbfn(ioc->bfa); 445 break; 446 447 case IOC_E_DETACH: 448 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 449 bfa_iocpf_stop(ioc); 450 break; 451 452 default: 453 bfa_sm_fault(event); 454 } 455 } 456 457 static void 458 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) 459 { 460 } 461 462 /* Hardware initialization retry. */ 463 static void 464 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) 465 { 466 switch (event) { 467 case IOC_E_ENABLED: 468 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 469 break; 470 471 case IOC_E_PFFAILED: 472 case IOC_E_HWERROR: 473 /** 474 * Initialization retry failed. 475 */ 476 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 477 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 478 if (event != IOC_E_PFFAILED) 479 bfa_iocpf_initfail(ioc); 480 break; 481 482 case IOC_E_HWFAILED: 483 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 484 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 485 break; 486 487 case IOC_E_ENABLE: 488 break; 489 490 case IOC_E_DISABLE: 491 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 492 break; 493 494 case IOC_E_DETACH: 495 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 496 bfa_iocpf_stop(ioc); 497 break; 498 499 default: 500 bfa_sm_fault(event); 501 } 502 } 503 504 static void 505 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) 506 { 507 } 508 509 /* IOC failure. */ 510 static void 511 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) 512 { 513 switch (event) { 514 case IOC_E_ENABLE: 515 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 516 break; 517 518 case IOC_E_DISABLE: 519 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 520 break; 521 522 case IOC_E_DETACH: 523 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 524 bfa_iocpf_stop(ioc); 525 break; 526 527 case IOC_E_HWERROR: 528 /* HB failure notification, ignore. */ 529 break; 530 531 default: 532 bfa_sm_fault(event); 533 } 534 } 535 536 static void 537 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc) 538 { 539 } 540 541 /* IOC failure. */ 542 static void 543 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) 544 { 545 switch (event) { 546 547 case IOC_E_ENABLE: 548 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 549 break; 550 551 case IOC_E_DISABLE: 552 ioc->cbfn->disable_cbfn(ioc->bfa); 553 break; 554 555 case IOC_E_DETACH: 556 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 557 break; 558 559 default: 560 bfa_sm_fault(event); 561 } 562 } 563 564 /* IOCPF State Machine */ 565 566 /* Reset entry actions -- initialize state machine */ 567 static void 568 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) 569 { 570 iocpf->fw_mismatch_notified = false; 571 iocpf->auto_recover = bfa_nw_auto_recover; 572 } 573 574 /* Beginning state. IOC is in reset state. */ 575 static void 576 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) 577 { 578 switch (event) { 579 case IOCPF_E_ENABLE: 580 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); 581 break; 582 583 case IOCPF_E_STOP: 584 break; 585 586 default: 587 bfa_sm_fault(event); 588 } 589 } 590 591 /* Semaphore should be acquired for version check. */ 592 static void 593 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) 594 { 595 bfa_ioc_hw_sem_init(iocpf->ioc); 596 bfa_ioc_hw_sem_get(iocpf->ioc); 597 } 598 599 /* Awaiting h/w semaphore to continue with version check. */ 600 static void 601 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) 602 { 603 struct bfa_ioc *ioc = iocpf->ioc; 604 605 switch (event) { 606 case IOCPF_E_SEMLOCKED: 607 if (bfa_ioc_firmware_lock(ioc)) { 608 if (bfa_ioc_sync_start(ioc)) { 609 bfa_ioc_sync_join(ioc); 610 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 611 } else { 612 bfa_ioc_firmware_unlock(ioc); 613 bfa_nw_ioc_hw_sem_release(ioc); 614 mod_timer(&ioc->sem_timer, jiffies + 615 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 616 } 617 } else { 618 bfa_nw_ioc_hw_sem_release(ioc); 619 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); 620 } 621 break; 622 623 case IOCPF_E_SEM_ERROR: 624 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 625 bfa_ioc_pf_hwfailed(ioc); 626 break; 627 628 case IOCPF_E_DISABLE: 629 bfa_ioc_hw_sem_get_cancel(ioc); 630 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 631 bfa_ioc_pf_disabled(ioc); 632 break; 633 634 case IOCPF_E_STOP: 635 bfa_ioc_hw_sem_get_cancel(ioc); 636 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 637 break; 638 639 default: 640 bfa_sm_fault(event); 641 } 642 } 643 644 /* Notify enable completion callback */ 645 static void 646 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) 647 { 648 /* Call only the first time sm enters fwmismatch state. */ 649 if (!iocpf->fw_mismatch_notified) 650 bfa_ioc_pf_fwmismatch(iocpf->ioc); 651 652 iocpf->fw_mismatch_notified = true; 653 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 654 msecs_to_jiffies(BFA_IOC_TOV)); 655 } 656 657 /* Awaiting firmware version match. */ 658 static void 659 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) 660 { 661 struct bfa_ioc *ioc = iocpf->ioc; 662 663 switch (event) { 664 case IOCPF_E_TIMEOUT: 665 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); 666 break; 667 668 case IOCPF_E_DISABLE: 669 del_timer(&ioc->iocpf_timer); 670 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 671 bfa_ioc_pf_disabled(ioc); 672 break; 673 674 case IOCPF_E_STOP: 675 del_timer(&ioc->iocpf_timer); 676 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 677 break; 678 679 default: 680 bfa_sm_fault(event); 681 } 682 } 683 684 /* Request for semaphore. */ 685 static void 686 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) 687 { 688 bfa_ioc_hw_sem_get(iocpf->ioc); 689 } 690 691 /* Awaiting semaphore for h/w initialzation. */ 692 static void 693 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) 694 { 695 struct bfa_ioc *ioc = iocpf->ioc; 696 697 switch (event) { 698 case IOCPF_E_SEMLOCKED: 699 if (bfa_ioc_sync_complete(ioc)) { 700 bfa_ioc_sync_join(ioc); 701 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 702 } else { 703 bfa_nw_ioc_hw_sem_release(ioc); 704 mod_timer(&ioc->sem_timer, jiffies + 705 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 706 } 707 break; 708 709 case IOCPF_E_SEM_ERROR: 710 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 711 bfa_ioc_pf_hwfailed(ioc); 712 break; 713 714 case IOCPF_E_DISABLE: 715 bfa_ioc_hw_sem_get_cancel(ioc); 716 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 717 break; 718 719 default: 720 bfa_sm_fault(event); 721 } 722 } 723 724 static void 725 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) 726 { 727 iocpf->poll_time = 0; 728 bfa_ioc_reset(iocpf->ioc, false); 729 } 730 731 /* Hardware is being initialized. Interrupts are enabled. 732 * Holding hardware semaphore lock. 733 */ 734 static void 735 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) 736 { 737 struct bfa_ioc *ioc = iocpf->ioc; 738 739 switch (event) { 740 case IOCPF_E_FWREADY: 741 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); 742 break; 743 744 case IOCPF_E_TIMEOUT: 745 bfa_nw_ioc_hw_sem_release(ioc); 746 bfa_ioc_pf_failed(ioc); 747 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 748 break; 749 750 case IOCPF_E_DISABLE: 751 del_timer(&ioc->iocpf_timer); 752 bfa_ioc_sync_leave(ioc); 753 bfa_nw_ioc_hw_sem_release(ioc); 754 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 755 break; 756 757 default: 758 bfa_sm_fault(event); 759 } 760 } 761 762 static void 763 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) 764 { 765 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 766 msecs_to_jiffies(BFA_IOC_TOV)); 767 /** 768 * Enable Interrupts before sending fw IOC ENABLE cmd. 769 */ 770 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); 771 bfa_ioc_send_enable(iocpf->ioc); 772 } 773 774 /* Host IOC function is being enabled, awaiting response from firmware. 775 * Semaphore is acquired. 776 */ 777 static void 778 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 779 { 780 struct bfa_ioc *ioc = iocpf->ioc; 781 782 switch (event) { 783 case IOCPF_E_FWRSP_ENABLE: 784 del_timer(&ioc->iocpf_timer); 785 bfa_nw_ioc_hw_sem_release(ioc); 786 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); 787 break; 788 789 case IOCPF_E_INITFAIL: 790 del_timer(&ioc->iocpf_timer); 791 /* 792 * !!! fall through !!! 793 */ 794 case IOCPF_E_TIMEOUT: 795 bfa_nw_ioc_hw_sem_release(ioc); 796 if (event == IOCPF_E_TIMEOUT) 797 bfa_ioc_pf_failed(ioc); 798 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 799 break; 800 801 case IOCPF_E_DISABLE: 802 del_timer(&ioc->iocpf_timer); 803 bfa_nw_ioc_hw_sem_release(ioc); 804 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 805 break; 806 807 default: 808 bfa_sm_fault(event); 809 } 810 } 811 812 static void 813 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf) 814 { 815 bfa_ioc_pf_enabled(iocpf->ioc); 816 } 817 818 static void 819 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) 820 { 821 switch (event) { 822 case IOCPF_E_DISABLE: 823 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 824 break; 825 826 case IOCPF_E_GETATTRFAIL: 827 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 828 break; 829 830 case IOCPF_E_FAIL: 831 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); 832 break; 833 834 default: 835 bfa_sm_fault(event); 836 } 837 } 838 839 static void 840 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) 841 { 842 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 843 msecs_to_jiffies(BFA_IOC_TOV)); 844 bfa_ioc_send_disable(iocpf->ioc); 845 } 846 847 /* IOC is being disabled */ 848 static void 849 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 850 { 851 struct bfa_ioc *ioc = iocpf->ioc; 852 853 switch (event) { 854 case IOCPF_E_FWRSP_DISABLE: 855 del_timer(&ioc->iocpf_timer); 856 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 857 break; 858 859 case IOCPF_E_FAIL: 860 del_timer(&ioc->iocpf_timer); 861 /* 862 * !!! fall through !!! 863 */ 864 865 case IOCPF_E_TIMEOUT: 866 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 867 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 868 break; 869 870 case IOCPF_E_FWRSP_ENABLE: 871 break; 872 873 default: 874 bfa_sm_fault(event); 875 } 876 } 877 878 static void 879 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) 880 { 881 bfa_ioc_hw_sem_get(iocpf->ioc); 882 } 883 884 /* IOC hb ack request is being removed. */ 885 static void 886 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 887 { 888 struct bfa_ioc *ioc = iocpf->ioc; 889 890 switch (event) { 891 case IOCPF_E_SEMLOCKED: 892 bfa_ioc_sync_leave(ioc); 893 bfa_nw_ioc_hw_sem_release(ioc); 894 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 895 break; 896 897 case IOCPF_E_SEM_ERROR: 898 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 899 bfa_ioc_pf_hwfailed(ioc); 900 break; 901 902 case IOCPF_E_FAIL: 903 break; 904 905 default: 906 bfa_sm_fault(event); 907 } 908 } 909 910 /* IOC disable completion entry. */ 911 static void 912 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) 913 { 914 bfa_ioc_mbox_flush(iocpf->ioc); 915 bfa_ioc_pf_disabled(iocpf->ioc); 916 } 917 918 static void 919 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) 920 { 921 struct bfa_ioc *ioc = iocpf->ioc; 922 923 switch (event) { 924 case IOCPF_E_ENABLE: 925 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 926 break; 927 928 case IOCPF_E_STOP: 929 bfa_ioc_firmware_unlock(ioc); 930 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 931 break; 932 933 default: 934 bfa_sm_fault(event); 935 } 936 } 937 938 static void 939 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) 940 { 941 bfa_nw_ioc_debug_save_ftrc(iocpf->ioc); 942 bfa_ioc_hw_sem_get(iocpf->ioc); 943 } 944 945 /* Hardware initialization failed. */ 946 static void 947 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 948 { 949 struct bfa_ioc *ioc = iocpf->ioc; 950 951 switch (event) { 952 case IOCPF_E_SEMLOCKED: 953 bfa_ioc_notify_fail(ioc); 954 bfa_ioc_sync_leave(ioc); 955 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 956 bfa_nw_ioc_hw_sem_release(ioc); 957 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 958 break; 959 960 case IOCPF_E_SEM_ERROR: 961 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 962 bfa_ioc_pf_hwfailed(ioc); 963 break; 964 965 case IOCPF_E_DISABLE: 966 bfa_ioc_hw_sem_get_cancel(ioc); 967 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 968 break; 969 970 case IOCPF_E_STOP: 971 bfa_ioc_hw_sem_get_cancel(ioc); 972 bfa_ioc_firmware_unlock(ioc); 973 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 974 break; 975 976 case IOCPF_E_FAIL: 977 break; 978 979 default: 980 bfa_sm_fault(event); 981 } 982 } 983 984 static void 985 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) 986 { 987 } 988 989 /* Hardware initialization failed. */ 990 static void 991 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) 992 { 993 struct bfa_ioc *ioc = iocpf->ioc; 994 995 switch (event) { 996 case IOCPF_E_DISABLE: 997 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 998 break; 999 1000 case IOCPF_E_STOP: 1001 bfa_ioc_firmware_unlock(ioc); 1002 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 1003 break; 1004 1005 default: 1006 bfa_sm_fault(event); 1007 } 1008 } 1009 1010 static void 1011 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) 1012 { 1013 /** 1014 * Mark IOC as failed in hardware and stop firmware. 1015 */ 1016 bfa_ioc_lpu_stop(iocpf->ioc); 1017 1018 /** 1019 * Flush any queued up mailbox requests. 1020 */ 1021 bfa_ioc_mbox_flush(iocpf->ioc); 1022 bfa_ioc_hw_sem_get(iocpf->ioc); 1023 } 1024 1025 /* IOC is in failed state. */ 1026 static void 1027 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 1028 { 1029 struct bfa_ioc *ioc = iocpf->ioc; 1030 1031 switch (event) { 1032 case IOCPF_E_SEMLOCKED: 1033 bfa_ioc_sync_ack(ioc); 1034 bfa_ioc_notify_fail(ioc); 1035 if (!iocpf->auto_recover) { 1036 bfa_ioc_sync_leave(ioc); 1037 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 1038 bfa_nw_ioc_hw_sem_release(ioc); 1039 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1040 } else { 1041 if (bfa_ioc_sync_complete(ioc)) 1042 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 1043 else { 1044 bfa_nw_ioc_hw_sem_release(ioc); 1045 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 1046 } 1047 } 1048 break; 1049 1050 case IOCPF_E_SEM_ERROR: 1051 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1052 bfa_ioc_pf_hwfailed(ioc); 1053 break; 1054 1055 case IOCPF_E_DISABLE: 1056 bfa_ioc_hw_sem_get_cancel(ioc); 1057 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 1058 break; 1059 1060 case IOCPF_E_FAIL: 1061 break; 1062 1063 default: 1064 bfa_sm_fault(event); 1065 } 1066 } 1067 1068 static void 1069 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) 1070 { 1071 } 1072 1073 /* IOC is in failed state. */ 1074 static void 1075 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) 1076 { 1077 switch (event) { 1078 case IOCPF_E_DISABLE: 1079 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1080 break; 1081 1082 default: 1083 bfa_sm_fault(event); 1084 } 1085 } 1086 1087 /* BFA IOC private functions */ 1088 1089 /* Notify common modules registered for notification. */ 1090 static void 1091 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) 1092 { 1093 struct bfa_ioc_notify *notify; 1094 1095 list_for_each_entry(notify, &ioc->notify_q, qe) 1096 notify->cbfn(notify->cbarg, event); 1097 } 1098 1099 static void 1100 bfa_ioc_disable_comp(struct bfa_ioc *ioc) 1101 { 1102 ioc->cbfn->disable_cbfn(ioc->bfa); 1103 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED); 1104 } 1105 1106 bool 1107 bfa_nw_ioc_sem_get(void __iomem *sem_reg) 1108 { 1109 u32 r32; 1110 int cnt = 0; 1111 #define BFA_SEM_SPINCNT 3000 1112 1113 r32 = readl(sem_reg); 1114 1115 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) { 1116 cnt++; 1117 udelay(2); 1118 r32 = readl(sem_reg); 1119 } 1120 1121 if (!(r32 & 1)) 1122 return true; 1123 1124 return false; 1125 } 1126 1127 void 1128 bfa_nw_ioc_sem_release(void __iomem *sem_reg) 1129 { 1130 readl(sem_reg); 1131 writel(1, sem_reg); 1132 } 1133 1134 /* Clear fwver hdr */ 1135 static void 1136 bfa_ioc_fwver_clear(struct bfa_ioc *ioc) 1137 { 1138 u32 pgnum, pgoff, loff = 0; 1139 int i; 1140 1141 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); 1142 pgoff = PSS_SMEM_PGOFF(loff); 1143 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1144 1145 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) { 1146 writel(0, ioc->ioc_regs.smem_page_start + loff); 1147 loff += sizeof(u32); 1148 } 1149 } 1150 1151 1152 static void 1153 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) 1154 { 1155 struct bfi_ioc_image_hdr fwhdr; 1156 u32 fwstate, r32; 1157 1158 /* Spin on init semaphore to serialize. */ 1159 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); 1160 while (r32 & 0x1) { 1161 udelay(20); 1162 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); 1163 } 1164 1165 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 1166 if (fwstate == BFI_IOC_UNINIT) { 1167 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1168 return; 1169 } 1170 1171 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1172 1173 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) { 1174 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1175 return; 1176 } 1177 1178 bfa_ioc_fwver_clear(ioc); 1179 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); 1180 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); 1181 1182 /* 1183 * Try to lock and then unlock the semaphore. 1184 */ 1185 readl(ioc->ioc_regs.ioc_sem_reg); 1186 writel(1, ioc->ioc_regs.ioc_sem_reg); 1187 1188 /* Unlock init semaphore */ 1189 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1190 } 1191 1192 static void 1193 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc) 1194 { 1195 u32 r32; 1196 1197 /** 1198 * First read to the semaphore register will return 0, subsequent reads 1199 * will return 1. Semaphore is released by writing 1 to the register 1200 */ 1201 r32 = readl(ioc->ioc_regs.ioc_sem_reg); 1202 if (r32 == ~0) { 1203 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); 1204 return; 1205 } 1206 if (!(r32 & 1)) { 1207 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); 1208 return; 1209 } 1210 1211 mod_timer(&ioc->sem_timer, jiffies + 1212 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 1213 } 1214 1215 void 1216 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc) 1217 { 1218 writel(1, ioc->ioc_regs.ioc_sem_reg); 1219 } 1220 1221 static void 1222 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) 1223 { 1224 del_timer(&ioc->sem_timer); 1225 } 1226 1227 /* Initialize LPU local memory (aka secondary memory / SRAM) */ 1228 static void 1229 bfa_ioc_lmem_init(struct bfa_ioc *ioc) 1230 { 1231 u32 pss_ctl; 1232 int i; 1233 #define PSS_LMEM_INIT_TIME 10000 1234 1235 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1236 pss_ctl &= ~__PSS_LMEM_RESET; 1237 pss_ctl |= __PSS_LMEM_INIT_EN; 1238 1239 /* 1240 * i2c workaround 12.5khz clock 1241 */ 1242 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); 1243 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1244 1245 /** 1246 * wait for memory initialization to be complete 1247 */ 1248 i = 0; 1249 do { 1250 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1251 i++; 1252 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); 1253 1254 /** 1255 * If memory initialization is not successful, IOC timeout will catch 1256 * such failures. 1257 */ 1258 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE)); 1259 1260 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); 1261 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1262 } 1263 1264 static void 1265 bfa_ioc_lpu_start(struct bfa_ioc *ioc) 1266 { 1267 u32 pss_ctl; 1268 1269 /** 1270 * Take processor out of reset. 1271 */ 1272 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1273 pss_ctl &= ~__PSS_LPU0_RESET; 1274 1275 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1276 } 1277 1278 static void 1279 bfa_ioc_lpu_stop(struct bfa_ioc *ioc) 1280 { 1281 u32 pss_ctl; 1282 1283 /** 1284 * Put processors in reset. 1285 */ 1286 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1287 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); 1288 1289 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1290 } 1291 1292 /* Get driver and firmware versions. */ 1293 void 1294 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1295 { 1296 u32 pgnum; 1297 u32 loff = 0; 1298 int i; 1299 u32 *fwsig = (u32 *) fwhdr; 1300 1301 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1302 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1303 1304 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); 1305 i++) { 1306 fwsig[i] = 1307 swab32(readl(loff + ioc->ioc_regs.smem_page_start)); 1308 loff += sizeof(u32); 1309 } 1310 } 1311 1312 static bool 1313 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1, 1314 struct bfi_ioc_image_hdr *fwhdr_2) 1315 { 1316 int i; 1317 1318 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { 1319 if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) 1320 return false; 1321 } 1322 1323 return true; 1324 } 1325 1326 /* Returns TRUE if major minor and maintenance are same. 1327 * If patch version are same, check for MD5 Checksum to be same. 1328 */ 1329 static bool 1330 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr, 1331 struct bfi_ioc_image_hdr *fwhdr_to_cmp) 1332 { 1333 if (drv_fwhdr->signature != fwhdr_to_cmp->signature) 1334 return false; 1335 if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) 1336 return false; 1337 if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) 1338 return false; 1339 if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) 1340 return false; 1341 if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && 1342 drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && 1343 drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) 1344 return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); 1345 1346 return true; 1347 } 1348 1349 static bool 1350 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr) 1351 { 1352 if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) 1353 return false; 1354 1355 return true; 1356 } 1357 1358 static bool 1359 fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr) 1360 { 1361 if (fwhdr->fwver.phase == 0 && 1362 fwhdr->fwver.build == 0) 1363 return false; 1364 1365 return true; 1366 } 1367 1368 /* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */ 1369 static enum bfi_ioc_img_ver_cmp 1370 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr, 1371 struct bfi_ioc_image_hdr *fwhdr_to_cmp) 1372 { 1373 if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp)) 1374 return BFI_IOC_IMG_VER_INCOMP; 1375 1376 if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) 1377 return BFI_IOC_IMG_VER_BETTER; 1378 else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) 1379 return BFI_IOC_IMG_VER_OLD; 1380 1381 /* GA takes priority over internal builds of the same patch stream. 1382 * At this point major minor maint and patch numbers are same. 1383 */ 1384 if (fwhdr_is_ga(base_fwhdr)) 1385 if (fwhdr_is_ga(fwhdr_to_cmp)) 1386 return BFI_IOC_IMG_VER_SAME; 1387 else 1388 return BFI_IOC_IMG_VER_OLD; 1389 else 1390 if (fwhdr_is_ga(fwhdr_to_cmp)) 1391 return BFI_IOC_IMG_VER_BETTER; 1392 1393 if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) 1394 return BFI_IOC_IMG_VER_BETTER; 1395 else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) 1396 return BFI_IOC_IMG_VER_OLD; 1397 1398 if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) 1399 return BFI_IOC_IMG_VER_BETTER; 1400 else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) 1401 return BFI_IOC_IMG_VER_OLD; 1402 1403 /* All Version Numbers are equal. 1404 * Md5 check to be done as a part of compatibility check. 1405 */ 1406 return BFI_IOC_IMG_VER_SAME; 1407 } 1408 1409 /* register definitions */ 1410 #define FLI_CMD_REG 0x0001d000 1411 #define FLI_WRDATA_REG 0x0001d00c 1412 #define FLI_RDDATA_REG 0x0001d010 1413 #define FLI_ADDR_REG 0x0001d004 1414 #define FLI_DEV_STATUS_REG 0x0001d014 1415 1416 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */ 1417 #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */ 1418 #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */ 1419 #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */ 1420 1421 #define NFC_STATE_RUNNING 0x20000001 1422 #define NFC_STATE_PAUSED 0x00004560 1423 #define NFC_VER_VALID 0x147 1424 1425 enum bfa_flash_cmd { 1426 BFA_FLASH_FAST_READ = 0x0b, /* fast read */ 1427 BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */ 1428 BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */ 1429 BFA_FLASH_WRITE = 0x02, /* write */ 1430 BFA_FLASH_READ_STATUS = 0x05, /* read status */ 1431 }; 1432 1433 /* hardware error definition */ 1434 enum bfa_flash_err { 1435 BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */ 1436 BFA_FLASH_UNINIT = -2, /*!< flash not initialized */ 1437 BFA_FLASH_BAD = -3, /*!< flash bad */ 1438 BFA_FLASH_BUSY = -4, /*!< flash busy */ 1439 BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */ 1440 BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */ 1441 BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */ 1442 BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */ 1443 BFA_FLASH_ERR_LEN = -9, /*!< invalid length */ 1444 }; 1445 1446 /* flash command register data structure */ 1447 union bfa_flash_cmd_reg { 1448 struct { 1449 #ifdef __BIG_ENDIAN 1450 u32 act:1; 1451 u32 rsv:1; 1452 u32 write_cnt:9; 1453 u32 read_cnt:9; 1454 u32 addr_cnt:4; 1455 u32 cmd:8; 1456 #else 1457 u32 cmd:8; 1458 u32 addr_cnt:4; 1459 u32 read_cnt:9; 1460 u32 write_cnt:9; 1461 u32 rsv:1; 1462 u32 act:1; 1463 #endif 1464 } r; 1465 u32 i; 1466 }; 1467 1468 /* flash device status register data structure */ 1469 union bfa_flash_dev_status_reg { 1470 struct { 1471 #ifdef __BIG_ENDIAN 1472 u32 rsv:21; 1473 u32 fifo_cnt:6; 1474 u32 busy:1; 1475 u32 init_status:1; 1476 u32 present:1; 1477 u32 bad:1; 1478 u32 good:1; 1479 #else 1480 u32 good:1; 1481 u32 bad:1; 1482 u32 present:1; 1483 u32 init_status:1; 1484 u32 busy:1; 1485 u32 fifo_cnt:6; 1486 u32 rsv:21; 1487 #endif 1488 } r; 1489 u32 i; 1490 }; 1491 1492 /* flash address register data structure */ 1493 union bfa_flash_addr_reg { 1494 struct { 1495 #ifdef __BIG_ENDIAN 1496 u32 addr:24; 1497 u32 dummy:8; 1498 #else 1499 u32 dummy:8; 1500 u32 addr:24; 1501 #endif 1502 } r; 1503 u32 i; 1504 }; 1505 1506 /* Flash raw private functions */ 1507 static void 1508 bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, 1509 u8 rd_cnt, u8 ad_cnt, u8 op) 1510 { 1511 union bfa_flash_cmd_reg cmd; 1512 1513 cmd.i = 0; 1514 cmd.r.act = 1; 1515 cmd.r.write_cnt = wr_cnt; 1516 cmd.r.read_cnt = rd_cnt; 1517 cmd.r.addr_cnt = ad_cnt; 1518 cmd.r.cmd = op; 1519 writel(cmd.i, (pci_bar + FLI_CMD_REG)); 1520 } 1521 1522 static void 1523 bfa_flash_set_addr(void __iomem *pci_bar, u32 address) 1524 { 1525 union bfa_flash_addr_reg addr; 1526 1527 addr.r.addr = address & 0x00ffffff; 1528 addr.r.dummy = 0; 1529 writel(addr.i, (pci_bar + FLI_ADDR_REG)); 1530 } 1531 1532 static int 1533 bfa_flash_cmd_act_check(void __iomem *pci_bar) 1534 { 1535 union bfa_flash_cmd_reg cmd; 1536 1537 cmd.i = readl(pci_bar + FLI_CMD_REG); 1538 1539 if (cmd.r.act) 1540 return BFA_FLASH_ERR_CMD_ACT; 1541 1542 return 0; 1543 } 1544 1545 /* Flush FLI data fifo. */ 1546 static u32 1547 bfa_flash_fifo_flush(void __iomem *pci_bar) 1548 { 1549 u32 i; 1550 u32 t; 1551 union bfa_flash_dev_status_reg dev_status; 1552 1553 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1554 1555 if (!dev_status.r.fifo_cnt) 1556 return 0; 1557 1558 /* fifo counter in terms of words */ 1559 for (i = 0; i < dev_status.r.fifo_cnt; i++) 1560 t = readl(pci_bar + FLI_RDDATA_REG); 1561 1562 /* Check the device status. It may take some time. */ 1563 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { 1564 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1565 if (!dev_status.r.fifo_cnt) 1566 break; 1567 } 1568 1569 if (dev_status.r.fifo_cnt) 1570 return BFA_FLASH_ERR_FIFO_CNT; 1571 1572 return 0; 1573 } 1574 1575 /* Read flash status. */ 1576 static u32 1577 bfa_flash_status_read(void __iomem *pci_bar) 1578 { 1579 union bfa_flash_dev_status_reg dev_status; 1580 u32 status; 1581 u32 ret_status; 1582 int i; 1583 1584 status = bfa_flash_fifo_flush(pci_bar); 1585 if (status < 0) 1586 return status; 1587 1588 bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); 1589 1590 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { 1591 status = bfa_flash_cmd_act_check(pci_bar); 1592 if (!status) 1593 break; 1594 } 1595 1596 if (status) 1597 return status; 1598 1599 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1600 if (!dev_status.r.fifo_cnt) 1601 return BFA_FLASH_BUSY; 1602 1603 ret_status = readl(pci_bar + FLI_RDDATA_REG); 1604 ret_status >>= 24; 1605 1606 status = bfa_flash_fifo_flush(pci_bar); 1607 if (status < 0) 1608 return status; 1609 1610 return ret_status; 1611 } 1612 1613 /* Start flash read operation. */ 1614 static u32 1615 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, 1616 char *buf) 1617 { 1618 u32 status; 1619 1620 /* len must be mutiple of 4 and not exceeding fifo size */ 1621 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) 1622 return BFA_FLASH_ERR_LEN; 1623 1624 /* check status */ 1625 status = bfa_flash_status_read(pci_bar); 1626 if (status == BFA_FLASH_BUSY) 1627 status = bfa_flash_status_read(pci_bar); 1628 1629 if (status < 0) 1630 return status; 1631 1632 /* check if write-in-progress bit is cleared */ 1633 if (status & BFA_FLASH_WIP_MASK) 1634 return BFA_FLASH_ERR_WIP; 1635 1636 bfa_flash_set_addr(pci_bar, offset); 1637 1638 bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); 1639 1640 return 0; 1641 } 1642 1643 /* Check flash read operation. */ 1644 static u32 1645 bfa_flash_read_check(void __iomem *pci_bar) 1646 { 1647 if (bfa_flash_cmd_act_check(pci_bar)) 1648 return 1; 1649 1650 return 0; 1651 } 1652 1653 /* End flash read operation. */ 1654 static void 1655 bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) 1656 { 1657 u32 i; 1658 1659 /* read data fifo up to 32 words */ 1660 for (i = 0; i < len; i += 4) { 1661 u32 w = readl(pci_bar + FLI_RDDATA_REG); 1662 *((u32 *)(buf + i)) = swab32(w); 1663 } 1664 1665 bfa_flash_fifo_flush(pci_bar); 1666 } 1667 1668 /* Perform flash raw read. */ 1669 1670 #define FLASH_BLOCKING_OP_MAX 500 1671 #define FLASH_SEM_LOCK_REG 0x18820 1672 1673 static int 1674 bfa_raw_sem_get(void __iomem *bar) 1675 { 1676 int locked; 1677 1678 locked = readl(bar + FLASH_SEM_LOCK_REG); 1679 1680 return !locked; 1681 } 1682 1683 static enum bfa_status 1684 bfa_flash_sem_get(void __iomem *bar) 1685 { 1686 u32 n = FLASH_BLOCKING_OP_MAX; 1687 1688 while (!bfa_raw_sem_get(bar)) { 1689 if (--n <= 0) 1690 return BFA_STATUS_BADFLASH; 1691 mdelay(10); 1692 } 1693 return BFA_STATUS_OK; 1694 } 1695 1696 static void 1697 bfa_flash_sem_put(void __iomem *bar) 1698 { 1699 writel(0, (bar + FLASH_SEM_LOCK_REG)); 1700 } 1701 1702 static enum bfa_status 1703 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, 1704 u32 len) 1705 { 1706 u32 n, status; 1707 u32 off, l, s, residue, fifo_sz; 1708 1709 residue = len; 1710 off = 0; 1711 fifo_sz = BFA_FLASH_FIFO_SIZE; 1712 status = bfa_flash_sem_get(pci_bar); 1713 if (status != BFA_STATUS_OK) 1714 return status; 1715 1716 while (residue) { 1717 s = offset + off; 1718 n = s / fifo_sz; 1719 l = (n + 1) * fifo_sz - s; 1720 if (l > residue) 1721 l = residue; 1722 1723 status = bfa_flash_read_start(pci_bar, offset + off, l, 1724 &buf[off]); 1725 if (status < 0) { 1726 bfa_flash_sem_put(pci_bar); 1727 return BFA_STATUS_FAILED; 1728 } 1729 1730 n = BFA_FLASH_BLOCKING_OP_MAX; 1731 while (bfa_flash_read_check(pci_bar)) { 1732 if (--n <= 0) { 1733 bfa_flash_sem_put(pci_bar); 1734 return BFA_STATUS_FAILED; 1735 } 1736 } 1737 1738 bfa_flash_read_end(pci_bar, l, &buf[off]); 1739 1740 residue -= l; 1741 off += l; 1742 } 1743 bfa_flash_sem_put(pci_bar); 1744 1745 return BFA_STATUS_OK; 1746 } 1747 1748 #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */ 1749 1750 static enum bfa_status 1751 bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off, 1752 u32 *fwimg) 1753 { 1754 return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, 1755 BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), 1756 (char *)fwimg, BFI_FLASH_CHUNK_SZ); 1757 } 1758 1759 static enum bfi_ioc_img_ver_cmp 1760 bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc, 1761 struct bfi_ioc_image_hdr *base_fwhdr) 1762 { 1763 struct bfi_ioc_image_hdr *flash_fwhdr; 1764 enum bfa_status status; 1765 u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; 1766 1767 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg); 1768 if (status != BFA_STATUS_OK) 1769 return BFI_IOC_IMG_VER_INCOMP; 1770 1771 flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg; 1772 if (bfa_ioc_flash_fwver_valid(flash_fwhdr)) 1773 return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); 1774 else 1775 return BFI_IOC_IMG_VER_INCOMP; 1776 } 1777 1778 /** 1779 * Returns TRUE if driver is willing to work with current smem f/w version. 1780 */ 1781 bool 1782 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1783 { 1784 struct bfi_ioc_image_hdr *drv_fwhdr; 1785 enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp; 1786 1787 drv_fwhdr = (struct bfi_ioc_image_hdr *) 1788 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); 1789 1790 /* If smem is incompatible or old, driver should not work with it. */ 1791 drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr); 1792 if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || 1793 drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { 1794 return false; 1795 } 1796 1797 /* IF Flash has a better F/W than smem do not work with smem. 1798 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. 1799 * If Flash is old or incomp work with smem iff smem f/w == drv f/w. 1800 */ 1801 smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr); 1802 1803 if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) 1804 return false; 1805 else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) 1806 return true; 1807 else 1808 return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? 1809 true : false; 1810 } 1811 1812 /* Return true if current running version is valid. Firmware signature and 1813 * execution context (driver/bios) must match. 1814 */ 1815 static bool 1816 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) 1817 { 1818 struct bfi_ioc_image_hdr fwhdr; 1819 1820 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1821 if (swab32(fwhdr.bootenv) != boot_env) 1822 return false; 1823 1824 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); 1825 } 1826 1827 /* Conditionally flush any pending message from firmware at start. */ 1828 static void 1829 bfa_ioc_msgflush(struct bfa_ioc *ioc) 1830 { 1831 u32 r32; 1832 1833 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); 1834 if (r32) 1835 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 1836 } 1837 1838 static void 1839 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) 1840 { 1841 enum bfi_ioc_state ioc_fwstate; 1842 bool fwvalid; 1843 u32 boot_env; 1844 1845 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 1846 1847 if (force) 1848 ioc_fwstate = BFI_IOC_UNINIT; 1849 1850 boot_env = BFI_FWBOOT_ENV_OS; 1851 1852 /** 1853 * check if firmware is valid 1854 */ 1855 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1856 false : bfa_ioc_fwver_valid(ioc, boot_env); 1857 1858 if (!fwvalid) { 1859 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == 1860 BFA_STATUS_OK) 1861 bfa_ioc_poll_fwinit(ioc); 1862 1863 return; 1864 } 1865 1866 /** 1867 * If hardware initialization is in progress (initialized by other IOC), 1868 * just wait for an initialization completion interrupt. 1869 */ 1870 if (ioc_fwstate == BFI_IOC_INITING) { 1871 bfa_ioc_poll_fwinit(ioc); 1872 return; 1873 } 1874 1875 /** 1876 * If IOC function is disabled and firmware version is same, 1877 * just re-enable IOC. 1878 */ 1879 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { 1880 /** 1881 * When using MSI-X any pending firmware ready event should 1882 * be flushed. Otherwise MSI-X interrupts are not delivered. 1883 */ 1884 bfa_ioc_msgflush(ioc); 1885 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 1886 return; 1887 } 1888 1889 /** 1890 * Initialize the h/w for any other states. 1891 */ 1892 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == 1893 BFA_STATUS_OK) 1894 bfa_ioc_poll_fwinit(ioc); 1895 } 1896 1897 void 1898 bfa_nw_ioc_timeout(struct bfa_ioc *ioc) 1899 { 1900 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); 1901 } 1902 1903 static void 1904 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len) 1905 { 1906 u32 *msgp = (u32 *) ioc_msg; 1907 u32 i; 1908 1909 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX)); 1910 1911 /* 1912 * first write msg to mailbox registers 1913 */ 1914 for (i = 0; i < len / sizeof(u32); i++) 1915 writel(cpu_to_le32(msgp[i]), 1916 ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); 1917 1918 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) 1919 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); 1920 1921 /* 1922 * write 1 to mailbox CMD to trigger LPU event 1923 */ 1924 writel(1, ioc->ioc_regs.hfn_mbox_cmd); 1925 (void) readl(ioc->ioc_regs.hfn_mbox_cmd); 1926 } 1927 1928 static void 1929 bfa_ioc_send_enable(struct bfa_ioc *ioc) 1930 { 1931 struct bfi_ioc_ctrl_req enable_req; 1932 struct timeval tv; 1933 1934 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1935 bfa_ioc_portid(ioc)); 1936 enable_req.clscode = htons(ioc->clscode); 1937 do_gettimeofday(&tv); 1938 enable_req.tv_sec = ntohl(tv.tv_sec); 1939 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); 1940 } 1941 1942 static void 1943 bfa_ioc_send_disable(struct bfa_ioc *ioc) 1944 { 1945 struct bfi_ioc_ctrl_req disable_req; 1946 1947 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, 1948 bfa_ioc_portid(ioc)); 1949 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); 1950 } 1951 1952 static void 1953 bfa_ioc_send_getattr(struct bfa_ioc *ioc) 1954 { 1955 struct bfi_ioc_getattr_req attr_req; 1956 1957 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, 1958 bfa_ioc_portid(ioc)); 1959 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); 1960 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); 1961 } 1962 1963 void 1964 bfa_nw_ioc_hb_check(struct bfa_ioc *ioc) 1965 { 1966 u32 hb_count; 1967 1968 hb_count = readl(ioc->ioc_regs.heartbeat); 1969 if (ioc->hb_count == hb_count) { 1970 bfa_ioc_recover(ioc); 1971 return; 1972 } else { 1973 ioc->hb_count = hb_count; 1974 } 1975 1976 bfa_ioc_mbox_poll(ioc); 1977 mod_timer(&ioc->hb_timer, jiffies + 1978 msecs_to_jiffies(BFA_IOC_HB_TOV)); 1979 } 1980 1981 static void 1982 bfa_ioc_hb_monitor(struct bfa_ioc *ioc) 1983 { 1984 ioc->hb_count = readl(ioc->ioc_regs.heartbeat); 1985 mod_timer(&ioc->hb_timer, jiffies + 1986 msecs_to_jiffies(BFA_IOC_HB_TOV)); 1987 } 1988 1989 static void 1990 bfa_ioc_hb_stop(struct bfa_ioc *ioc) 1991 { 1992 del_timer(&ioc->hb_timer); 1993 } 1994 1995 /* Initiate a full firmware download. */ 1996 static enum bfa_status 1997 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, 1998 u32 boot_env) 1999 { 2000 u32 *fwimg; 2001 u32 pgnum; 2002 u32 loff = 0; 2003 u32 chunkno = 0; 2004 u32 i; 2005 u32 asicmode; 2006 u32 fwimg_size; 2007 u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; 2008 enum bfa_status status; 2009 2010 if (boot_env == BFI_FWBOOT_ENV_OS && 2011 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2012 fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); 2013 2014 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 2015 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); 2016 if (status != BFA_STATUS_OK) 2017 return status; 2018 2019 fwimg = fwimg_buf; 2020 } else { 2021 fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); 2022 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 2023 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 2024 } 2025 2026 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 2027 2028 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2029 2030 for (i = 0; i < fwimg_size; i++) { 2031 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { 2032 chunkno = BFA_IOC_FLASH_CHUNK_NO(i); 2033 if (boot_env == BFI_FWBOOT_ENV_OS && 2034 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2035 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 2036 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), 2037 fwimg_buf); 2038 if (status != BFA_STATUS_OK) 2039 return status; 2040 2041 fwimg = fwimg_buf; 2042 } else { 2043 fwimg = bfa_cb_image_get_chunk( 2044 bfa_ioc_asic_gen(ioc), 2045 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 2046 } 2047 } 2048 2049 /** 2050 * write smem 2051 */ 2052 writel(swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]), 2053 ioc->ioc_regs.smem_page_start + loff); 2054 2055 loff += sizeof(u32); 2056 2057 /** 2058 * handle page offset wrap around 2059 */ 2060 loff = PSS_SMEM_PGOFF(loff); 2061 if (loff == 0) { 2062 pgnum++; 2063 writel(pgnum, 2064 ioc->ioc_regs.host_page_num_fn); 2065 } 2066 } 2067 2068 writel(bfa_ioc_smem_pgnum(ioc, 0), 2069 ioc->ioc_regs.host_page_num_fn); 2070 2071 /* 2072 * Set boot type, env and device mode at the end. 2073 */ 2074 if (boot_env == BFI_FWBOOT_ENV_OS && 2075 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2076 boot_type = BFI_FWBOOT_TYPE_NORMAL; 2077 } 2078 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, 2079 ioc->port0_mode, ioc->port1_mode); 2080 writel(asicmode, ((ioc->ioc_regs.smem_page_start) 2081 + BFI_FWBOOT_DEVMODE_OFF)); 2082 writel(boot_type, ((ioc->ioc_regs.smem_page_start) 2083 + (BFI_FWBOOT_TYPE_OFF))); 2084 writel(boot_env, ((ioc->ioc_regs.smem_page_start) 2085 + (BFI_FWBOOT_ENV_OFF))); 2086 return BFA_STATUS_OK; 2087 } 2088 2089 static void 2090 bfa_ioc_reset(struct bfa_ioc *ioc, bool force) 2091 { 2092 bfa_ioc_hwinit(ioc, force); 2093 } 2094 2095 /* BFA ioc enable reply by firmware */ 2096 static void 2097 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, 2098 u8 cap_bm) 2099 { 2100 struct bfa_iocpf *iocpf = &ioc->iocpf; 2101 2102 ioc->port_mode = ioc->port_mode_cfg = port_mode; 2103 ioc->ad_cap_bm = cap_bm; 2104 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); 2105 } 2106 2107 /* Update BFA configuration from firmware configuration. */ 2108 static void 2109 bfa_ioc_getattr_reply(struct bfa_ioc *ioc) 2110 { 2111 struct bfi_ioc_attr *attr = ioc->attr; 2112 2113 attr->adapter_prop = ntohl(attr->adapter_prop); 2114 attr->card_type = ntohl(attr->card_type); 2115 attr->maxfrsize = ntohs(attr->maxfrsize); 2116 2117 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 2118 } 2119 2120 /* Attach time initialization of mbox logic. */ 2121 static void 2122 bfa_ioc_mbox_attach(struct bfa_ioc *ioc) 2123 { 2124 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2125 int mc; 2126 2127 INIT_LIST_HEAD(&mod->cmd_q); 2128 for (mc = 0; mc < BFI_MC_MAX; mc++) { 2129 mod->mbhdlr[mc].cbfn = NULL; 2130 mod->mbhdlr[mc].cbarg = ioc->bfa; 2131 } 2132 } 2133 2134 /* Mbox poll timer -- restarts any pending mailbox requests. */ 2135 static void 2136 bfa_ioc_mbox_poll(struct bfa_ioc *ioc) 2137 { 2138 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2139 struct bfa_mbox_cmd *cmd; 2140 bfa_mbox_cmd_cbfn_t cbfn; 2141 void *cbarg; 2142 u32 stat; 2143 2144 /** 2145 * If no command pending, do nothing 2146 */ 2147 if (list_empty(&mod->cmd_q)) 2148 return; 2149 2150 /** 2151 * If previous command is not yet fetched by firmware, do nothing 2152 */ 2153 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 2154 if (stat) 2155 return; 2156 2157 /** 2158 * Enqueue command to firmware. 2159 */ 2160 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); 2161 list_del(&cmd->qe); 2162 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2163 2164 /** 2165 * Give a callback to the client, indicating that the command is sent 2166 */ 2167 if (cmd->cbfn) { 2168 cbfn = cmd->cbfn; 2169 cbarg = cmd->cbarg; 2170 cmd->cbfn = NULL; 2171 cbfn(cbarg); 2172 } 2173 } 2174 2175 /* Cleanup any pending requests. */ 2176 static void 2177 bfa_ioc_mbox_flush(struct bfa_ioc *ioc) 2178 { 2179 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2180 struct bfa_mbox_cmd *cmd; 2181 2182 while (!list_empty(&mod->cmd_q)) { 2183 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); 2184 list_del(&cmd->qe); 2185 } 2186 } 2187 2188 /** 2189 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap 2190 * 2191 * @ioc: memory for IOC 2192 * @tbuf: app memory to store data from smem 2193 * @soff: smem offset 2194 * @sz: size of smem in bytes 2195 */ 2196 static int 2197 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) 2198 { 2199 u32 pgnum, loff, r32; 2200 int i, len; 2201 u32 *buf = tbuf; 2202 2203 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); 2204 loff = PSS_SMEM_PGOFF(soff); 2205 2206 /* 2207 * Hold semaphore to serialize pll init and fwtrc. 2208 */ 2209 if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) 2210 return 1; 2211 2212 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2213 2214 len = sz/sizeof(u32); 2215 for (i = 0; i < len; i++) { 2216 r32 = swab32(readl(loff + ioc->ioc_regs.smem_page_start)); 2217 buf[i] = be32_to_cpu(r32); 2218 loff += sizeof(u32); 2219 2220 /** 2221 * handle page offset wrap around 2222 */ 2223 loff = PSS_SMEM_PGOFF(loff); 2224 if (loff == 0) { 2225 pgnum++; 2226 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2227 } 2228 } 2229 2230 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), 2231 ioc->ioc_regs.host_page_num_fn); 2232 2233 /* 2234 * release semaphore 2235 */ 2236 readl(ioc->ioc_regs.ioc_init_sem_reg); 2237 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 2238 return 0; 2239 } 2240 2241 /* Retrieve saved firmware trace from a prior IOC failure. */ 2242 int 2243 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) 2244 { 2245 u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id; 2246 int tlen, status = 0; 2247 2248 tlen = *trclen; 2249 if (tlen > BNA_DBG_FWTRC_LEN) 2250 tlen = BNA_DBG_FWTRC_LEN; 2251 2252 status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen); 2253 *trclen = tlen; 2254 return status; 2255 } 2256 2257 /* Save firmware trace if configured. */ 2258 static void 2259 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) 2260 { 2261 int tlen; 2262 2263 if (ioc->dbg_fwsave_once) { 2264 ioc->dbg_fwsave_once = false; 2265 if (ioc->dbg_fwsave_len) { 2266 tlen = ioc->dbg_fwsave_len; 2267 bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); 2268 } 2269 } 2270 } 2271 2272 /* Retrieve saved firmware trace from a prior IOC failure. */ 2273 int 2274 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) 2275 { 2276 int tlen; 2277 2278 if (ioc->dbg_fwsave_len == 0) 2279 return BFA_STATUS_ENOFSAVE; 2280 2281 tlen = *trclen; 2282 if (tlen > ioc->dbg_fwsave_len) 2283 tlen = ioc->dbg_fwsave_len; 2284 2285 memcpy(trcdata, ioc->dbg_fwsave, tlen); 2286 *trclen = tlen; 2287 return BFA_STATUS_OK; 2288 } 2289 2290 static void 2291 bfa_ioc_fail_notify(struct bfa_ioc *ioc) 2292 { 2293 /** 2294 * Notify driver and common modules registered for notification. 2295 */ 2296 ioc->cbfn->hbfail_cbfn(ioc->bfa); 2297 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); 2298 bfa_nw_ioc_debug_save_ftrc(ioc); 2299 } 2300 2301 /* IOCPF to IOC interface */ 2302 static void 2303 bfa_ioc_pf_enabled(struct bfa_ioc *ioc) 2304 { 2305 bfa_fsm_send_event(ioc, IOC_E_ENABLED); 2306 } 2307 2308 static void 2309 bfa_ioc_pf_disabled(struct bfa_ioc *ioc) 2310 { 2311 bfa_fsm_send_event(ioc, IOC_E_DISABLED); 2312 } 2313 2314 static void 2315 bfa_ioc_pf_failed(struct bfa_ioc *ioc) 2316 { 2317 bfa_fsm_send_event(ioc, IOC_E_PFFAILED); 2318 } 2319 2320 static void 2321 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc) 2322 { 2323 bfa_fsm_send_event(ioc, IOC_E_HWFAILED); 2324 } 2325 2326 static void 2327 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) 2328 { 2329 /** 2330 * Provide enable completion callback and AEN notification. 2331 */ 2332 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 2333 } 2334 2335 /* IOC public */ 2336 static enum bfa_status 2337 bfa_ioc_pll_init(struct bfa_ioc *ioc) 2338 { 2339 /* 2340 * Hold semaphore so that nobody can access the chip during init. 2341 */ 2342 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 2343 2344 bfa_ioc_pll_init_asic(ioc); 2345 2346 ioc->pllinit = true; 2347 2348 /* Initialize LMEM */ 2349 bfa_ioc_lmem_init(ioc); 2350 2351 /* 2352 * release semaphore. 2353 */ 2354 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 2355 2356 return BFA_STATUS_OK; 2357 } 2358 2359 /* Interface used by diag module to do firmware boot with memory test 2360 * as the entry vector. 2361 */ 2362 static enum bfa_status 2363 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, 2364 u32 boot_env) 2365 { 2366 struct bfi_ioc_image_hdr *drv_fwhdr; 2367 enum bfa_status status; 2368 bfa_ioc_stats(ioc, ioc_boots); 2369 2370 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 2371 return BFA_STATUS_FAILED; 2372 if (boot_env == BFI_FWBOOT_ENV_OS && 2373 boot_type == BFI_FWBOOT_TYPE_NORMAL) { 2374 drv_fwhdr = (struct bfi_ioc_image_hdr *) 2375 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); 2376 /* Work with Flash iff flash f/w is better than driver f/w. 2377 * Otherwise push drivers firmware. 2378 */ 2379 if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == 2380 BFI_IOC_IMG_VER_BETTER) 2381 boot_type = BFI_FWBOOT_TYPE_FLASH; 2382 } 2383 2384 /** 2385 * Initialize IOC state of all functions on a chip reset. 2386 */ 2387 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { 2388 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); 2389 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); 2390 } else { 2391 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); 2392 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); 2393 } 2394 2395 bfa_ioc_msgflush(ioc); 2396 status = bfa_ioc_download_fw(ioc, boot_type, boot_env); 2397 if (status == BFA_STATUS_OK) 2398 bfa_ioc_lpu_start(ioc); 2399 else 2400 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 2401 2402 return status; 2403 } 2404 2405 /* Enable/disable IOC failure auto recovery. */ 2406 void 2407 bfa_nw_ioc_auto_recover(bool auto_recover) 2408 { 2409 bfa_nw_auto_recover = auto_recover; 2410 } 2411 2412 static bool 2413 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) 2414 { 2415 u32 *msgp = mbmsg; 2416 u32 r32; 2417 int i; 2418 2419 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); 2420 if ((r32 & 1) == 0) 2421 return false; 2422 2423 /** 2424 * read the MBOX msg 2425 */ 2426 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); 2427 i++) { 2428 r32 = readl(ioc->ioc_regs.lpu_mbox + 2429 i * sizeof(u32)); 2430 msgp[i] = htonl(r32); 2431 } 2432 2433 /** 2434 * turn off mailbox interrupt by clearing mailbox status 2435 */ 2436 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 2437 readl(ioc->ioc_regs.lpu_mbox_cmd); 2438 2439 return true; 2440 } 2441 2442 static void 2443 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) 2444 { 2445 union bfi_ioc_i2h_msg_u *msg; 2446 struct bfa_iocpf *iocpf = &ioc->iocpf; 2447 2448 msg = (union bfi_ioc_i2h_msg_u *) m; 2449 2450 bfa_ioc_stats(ioc, ioc_isrs); 2451 2452 switch (msg->mh.msg_id) { 2453 case BFI_IOC_I2H_HBEAT: 2454 break; 2455 2456 case BFI_IOC_I2H_ENABLE_REPLY: 2457 bfa_ioc_enable_reply(ioc, 2458 (enum bfa_mode)msg->fw_event.port_mode, 2459 msg->fw_event.cap_bm); 2460 break; 2461 2462 case BFI_IOC_I2H_DISABLE_REPLY: 2463 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); 2464 break; 2465 2466 case BFI_IOC_I2H_GETATTR_REPLY: 2467 bfa_ioc_getattr_reply(ioc); 2468 break; 2469 2470 default: 2471 BUG_ON(1); 2472 } 2473 } 2474 2475 /** 2476 * bfa_nw_ioc_attach - IOC attach time initialization and setup. 2477 * 2478 * @ioc: memory for IOC 2479 * @bfa: driver instance structure 2480 */ 2481 void 2482 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) 2483 { 2484 ioc->bfa = bfa; 2485 ioc->cbfn = cbfn; 2486 ioc->fcmode = false; 2487 ioc->pllinit = false; 2488 ioc->dbg_fwsave_once = true; 2489 ioc->iocpf.ioc = ioc; 2490 2491 bfa_ioc_mbox_attach(ioc); 2492 INIT_LIST_HEAD(&ioc->notify_q); 2493 2494 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 2495 bfa_fsm_send_event(ioc, IOC_E_RESET); 2496 } 2497 2498 /* Driver detach time IOC cleanup. */ 2499 void 2500 bfa_nw_ioc_detach(struct bfa_ioc *ioc) 2501 { 2502 bfa_fsm_send_event(ioc, IOC_E_DETACH); 2503 2504 /* Done with detach, empty the notify_q. */ 2505 INIT_LIST_HEAD(&ioc->notify_q); 2506 } 2507 2508 /** 2509 * bfa_nw_ioc_pci_init - Setup IOC PCI properties. 2510 * 2511 * @pcidev: PCI device information for this IOC 2512 */ 2513 void 2514 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 2515 enum bfi_pcifn_class clscode) 2516 { 2517 ioc->clscode = clscode; 2518 ioc->pcidev = *pcidev; 2519 2520 /** 2521 * Initialize IOC and device personality 2522 */ 2523 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; 2524 ioc->asic_mode = BFI_ASIC_MODE_FC; 2525 2526 switch (pcidev->device_id) { 2527 case PCI_DEVICE_ID_BROCADE_CT: 2528 ioc->asic_gen = BFI_ASIC_GEN_CT; 2529 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; 2530 ioc->asic_mode = BFI_ASIC_MODE_ETH; 2531 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; 2532 ioc->ad_cap_bm = BFA_CM_CNA; 2533 break; 2534 2535 case BFA_PCI_DEVICE_ID_CT2: 2536 ioc->asic_gen = BFI_ASIC_GEN_CT2; 2537 if (clscode == BFI_PCIFN_CLASS_FC && 2538 pcidev->ssid == BFA_PCI_CT2_SSID_FC) { 2539 ioc->asic_mode = BFI_ASIC_MODE_FC16; 2540 ioc->fcmode = true; 2541 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; 2542 ioc->ad_cap_bm = BFA_CM_HBA; 2543 } else { 2544 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; 2545 ioc->asic_mode = BFI_ASIC_MODE_ETH; 2546 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { 2547 ioc->port_mode = 2548 ioc->port_mode_cfg = BFA_MODE_CNA; 2549 ioc->ad_cap_bm = BFA_CM_CNA; 2550 } else { 2551 ioc->port_mode = 2552 ioc->port_mode_cfg = BFA_MODE_NIC; 2553 ioc->ad_cap_bm = BFA_CM_NIC; 2554 } 2555 } 2556 break; 2557 2558 default: 2559 BUG_ON(1); 2560 } 2561 2562 /** 2563 * Set asic specific interfaces. 2564 */ 2565 if (ioc->asic_gen == BFI_ASIC_GEN_CT) 2566 bfa_nw_ioc_set_ct_hwif(ioc); 2567 else { 2568 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); 2569 bfa_nw_ioc_set_ct2_hwif(ioc); 2570 bfa_nw_ioc_ct2_poweron(ioc); 2571 } 2572 2573 bfa_ioc_map_port(ioc); 2574 bfa_ioc_reg_init(ioc); 2575 } 2576 2577 /** 2578 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory 2579 * 2580 * @dm_kva: kernel virtual address of IOC dma memory 2581 * @dm_pa: physical address of IOC dma memory 2582 */ 2583 void 2584 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) 2585 { 2586 /** 2587 * dma memory for firmware attribute 2588 */ 2589 ioc->attr_dma.kva = dm_kva; 2590 ioc->attr_dma.pa = dm_pa; 2591 ioc->attr = (struct bfi_ioc_attr *) dm_kva; 2592 } 2593 2594 /* Return size of dma memory required. */ 2595 u32 2596 bfa_nw_ioc_meminfo(void) 2597 { 2598 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ); 2599 } 2600 2601 void 2602 bfa_nw_ioc_enable(struct bfa_ioc *ioc) 2603 { 2604 bfa_ioc_stats(ioc, ioc_enables); 2605 ioc->dbg_fwsave_once = true; 2606 2607 bfa_fsm_send_event(ioc, IOC_E_ENABLE); 2608 } 2609 2610 void 2611 bfa_nw_ioc_disable(struct bfa_ioc *ioc) 2612 { 2613 bfa_ioc_stats(ioc, ioc_disables); 2614 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2615 } 2616 2617 /* Initialize memory for saving firmware trace. */ 2618 void 2619 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) 2620 { 2621 ioc->dbg_fwsave = dbg_fwsave; 2622 ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0; 2623 } 2624 2625 static u32 2626 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) 2627 { 2628 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); 2629 } 2630 2631 /* Register mailbox message handler function, to be called by common modules */ 2632 void 2633 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 2634 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) 2635 { 2636 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2637 2638 mod->mbhdlr[mc].cbfn = cbfn; 2639 mod->mbhdlr[mc].cbarg = cbarg; 2640 } 2641 2642 /** 2643 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware. 2644 * 2645 * @ioc: IOC instance 2646 * @cmd: Mailbox command 2647 * 2648 * Waits if mailbox is busy. Responsibility of caller to serialize 2649 */ 2650 bool 2651 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, 2652 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg) 2653 { 2654 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2655 u32 stat; 2656 2657 cmd->cbfn = cbfn; 2658 cmd->cbarg = cbarg; 2659 2660 /** 2661 * If a previous command is pending, queue new command 2662 */ 2663 if (!list_empty(&mod->cmd_q)) { 2664 list_add_tail(&cmd->qe, &mod->cmd_q); 2665 return true; 2666 } 2667 2668 /** 2669 * If mailbox is busy, queue command for poll timer 2670 */ 2671 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 2672 if (stat) { 2673 list_add_tail(&cmd->qe, &mod->cmd_q); 2674 return true; 2675 } 2676 2677 /** 2678 * mailbox is free -- queue command to firmware 2679 */ 2680 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2681 2682 return false; 2683 } 2684 2685 /* Handle mailbox interrupts */ 2686 void 2687 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) 2688 { 2689 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2690 struct bfi_mbmsg m; 2691 int mc; 2692 2693 if (bfa_ioc_msgget(ioc, &m)) { 2694 /** 2695 * Treat IOC message class as special. 2696 */ 2697 mc = m.mh.msg_class; 2698 if (mc == BFI_MC_IOC) { 2699 bfa_ioc_isr(ioc, &m); 2700 return; 2701 } 2702 2703 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) 2704 return; 2705 2706 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); 2707 } 2708 2709 bfa_ioc_lpu_read_stat(ioc); 2710 2711 /** 2712 * Try to send pending mailbox commands 2713 */ 2714 bfa_ioc_mbox_poll(ioc); 2715 } 2716 2717 void 2718 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc) 2719 { 2720 bfa_ioc_stats(ioc, ioc_hbfails); 2721 bfa_ioc_stats_hb_count(ioc, ioc->hb_count); 2722 bfa_fsm_send_event(ioc, IOC_E_HWERROR); 2723 } 2724 2725 /* return true if IOC is disabled */ 2726 bool 2727 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) 2728 { 2729 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || 2730 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2731 } 2732 2733 /* return true if IOC is operational */ 2734 bool 2735 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) 2736 { 2737 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 2738 } 2739 2740 /* Add to IOC heartbeat failure notification queue. To be used by common 2741 * modules such as cee, port, diag. 2742 */ 2743 void 2744 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, 2745 struct bfa_ioc_notify *notify) 2746 { 2747 list_add_tail(¬ify->qe, &ioc->notify_q); 2748 } 2749 2750 #define BFA_MFG_NAME "QLogic" 2751 static void 2752 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, 2753 struct bfa_adapter_attr *ad_attr) 2754 { 2755 struct bfi_ioc_attr *ioc_attr; 2756 2757 ioc_attr = ioc->attr; 2758 2759 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); 2760 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); 2761 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); 2762 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); 2763 memcpy(&ad_attr->vpd, &ioc_attr->vpd, 2764 sizeof(struct bfa_mfg_vpd)); 2765 2766 ad_attr->nports = bfa_ioc_get_nports(ioc); 2767 ad_attr->max_speed = bfa_ioc_speed_sup(ioc); 2768 2769 bfa_ioc_get_adapter_model(ioc, ad_attr->model); 2770 /* For now, model descr uses same model string */ 2771 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); 2772 2773 ad_attr->card_type = ioc_attr->card_type; 2774 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); 2775 2776 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) 2777 ad_attr->prototype = 1; 2778 else 2779 ad_attr->prototype = 0; 2780 2781 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 2782 bfa_nw_ioc_get_mac(ioc, ad_attr->mac); 2783 2784 ad_attr->pcie_gen = ioc_attr->pcie_gen; 2785 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; 2786 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; 2787 ad_attr->asic_rev = ioc_attr->asic_rev; 2788 2789 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); 2790 } 2791 2792 static enum bfa_ioc_type 2793 bfa_ioc_get_type(struct bfa_ioc *ioc) 2794 { 2795 if (ioc->clscode == BFI_PCIFN_CLASS_ETH) 2796 return BFA_IOC_TYPE_LL; 2797 2798 BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC)); 2799 2800 return (ioc->attr->port_mode == BFI_PORT_MODE_FC) 2801 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE; 2802 } 2803 2804 static void 2805 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) 2806 { 2807 memcpy(serial_num, 2808 (void *)ioc->attr->brcd_serialnum, 2809 BFA_ADAPTER_SERIAL_NUM_LEN); 2810 } 2811 2812 static void 2813 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) 2814 { 2815 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 2816 } 2817 2818 static void 2819 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev) 2820 { 2821 BUG_ON(!(chip_rev)); 2822 2823 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN); 2824 2825 chip_rev[0] = 'R'; 2826 chip_rev[1] = 'e'; 2827 chip_rev[2] = 'v'; 2828 chip_rev[3] = '-'; 2829 chip_rev[4] = ioc->attr->asic_rev; 2830 chip_rev[5] = '\0'; 2831 } 2832 2833 static void 2834 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) 2835 { 2836 memcpy(optrom_ver, ioc->attr->optrom_version, 2837 BFA_VERSION_LEN); 2838 } 2839 2840 static void 2841 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) 2842 { 2843 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 2844 } 2845 2846 static void 2847 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) 2848 { 2849 struct bfi_ioc_attr *ioc_attr; 2850 2851 BUG_ON(!(model)); 2852 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 2853 2854 ioc_attr = ioc->attr; 2855 2856 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", 2857 BFA_MFG_NAME, ioc_attr->card_type); 2858 } 2859 2860 static enum bfa_ioc_state 2861 bfa_ioc_get_state(struct bfa_ioc *ioc) 2862 { 2863 enum bfa_iocpf_state iocpf_st; 2864 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); 2865 2866 if (ioc_st == BFA_IOC_ENABLING || 2867 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { 2868 2869 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); 2870 2871 switch (iocpf_st) { 2872 case BFA_IOCPF_SEMWAIT: 2873 ioc_st = BFA_IOC_SEMWAIT; 2874 break; 2875 2876 case BFA_IOCPF_HWINIT: 2877 ioc_st = BFA_IOC_HWINIT; 2878 break; 2879 2880 case BFA_IOCPF_FWMISMATCH: 2881 ioc_st = BFA_IOC_FWMISMATCH; 2882 break; 2883 2884 case BFA_IOCPF_FAIL: 2885 ioc_st = BFA_IOC_FAIL; 2886 break; 2887 2888 case BFA_IOCPF_INITFAIL: 2889 ioc_st = BFA_IOC_INITFAIL; 2890 break; 2891 2892 default: 2893 break; 2894 } 2895 } 2896 return ioc_st; 2897 } 2898 2899 void 2900 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) 2901 { 2902 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); 2903 2904 ioc_attr->state = bfa_ioc_get_state(ioc); 2905 ioc_attr->port_id = bfa_ioc_portid(ioc); 2906 ioc_attr->port_mode = ioc->port_mode; 2907 2908 ioc_attr->port_mode_cfg = ioc->port_mode_cfg; 2909 ioc_attr->cap_bm = ioc->ad_cap_bm; 2910 2911 ioc_attr->ioc_type = bfa_ioc_get_type(ioc); 2912 2913 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); 2914 2915 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); 2916 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); 2917 ioc_attr->def_fn = bfa_ioc_is_default(ioc); 2918 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2919 } 2920 2921 /* WWN public */ 2922 static u64 2923 bfa_ioc_get_pwwn(struct bfa_ioc *ioc) 2924 { 2925 return ioc->attr->pwwn; 2926 } 2927 2928 void 2929 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac) 2930 { 2931 ether_addr_copy(mac, ioc->attr->mac); 2932 } 2933 2934 /* Firmware failure detected. Start recovery actions. */ 2935 static void 2936 bfa_ioc_recover(struct bfa_ioc *ioc) 2937 { 2938 pr_crit("Heart Beat of IOC has failed\n"); 2939 bfa_ioc_stats(ioc, ioc_hbfails); 2940 bfa_ioc_stats_hb_count(ioc, ioc->hb_count); 2941 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2942 } 2943 2944 /* BFA IOC PF private functions */ 2945 2946 static void 2947 bfa_iocpf_enable(struct bfa_ioc *ioc) 2948 { 2949 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); 2950 } 2951 2952 static void 2953 bfa_iocpf_disable(struct bfa_ioc *ioc) 2954 { 2955 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); 2956 } 2957 2958 static void 2959 bfa_iocpf_fail(struct bfa_ioc *ioc) 2960 { 2961 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); 2962 } 2963 2964 static void 2965 bfa_iocpf_initfail(struct bfa_ioc *ioc) 2966 { 2967 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); 2968 } 2969 2970 static void 2971 bfa_iocpf_getattrfail(struct bfa_ioc *ioc) 2972 { 2973 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); 2974 } 2975 2976 static void 2977 bfa_iocpf_stop(struct bfa_ioc *ioc) 2978 { 2979 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); 2980 } 2981 2982 void 2983 bfa_nw_iocpf_timeout(struct bfa_ioc *ioc) 2984 { 2985 enum bfa_iocpf_state iocpf_st; 2986 2987 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); 2988 2989 if (iocpf_st == BFA_IOCPF_HWINIT) 2990 bfa_ioc_poll_fwinit(ioc); 2991 else 2992 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 2993 } 2994 2995 void 2996 bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc) 2997 { 2998 bfa_ioc_hw_sem_get(ioc); 2999 } 3000 3001 static void 3002 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) 3003 { 3004 u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 3005 3006 if (fwstate == BFI_IOC_DISABLED) { 3007 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 3008 return; 3009 } 3010 3011 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { 3012 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 3013 } else { 3014 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; 3015 mod_timer(&ioc->iocpf_timer, jiffies + 3016 msecs_to_jiffies(BFA_IOC_POLL_TOV)); 3017 } 3018 } 3019 3020 /* 3021 * Flash module specific 3022 */ 3023 3024 /* 3025 * FLASH DMA buffer should be big enough to hold both MFG block and 3026 * asic block(64k) at the same time and also should be 2k aligned to 3027 * avoid write segement to cross sector boundary. 3028 */ 3029 #define BFA_FLASH_SEG_SZ 2048 3030 #define BFA_FLASH_DMA_BUF_SZ \ 3031 roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ) 3032 3033 static void 3034 bfa_flash_cb(struct bfa_flash *flash) 3035 { 3036 flash->op_busy = 0; 3037 if (flash->cbfn) 3038 flash->cbfn(flash->cbarg, flash->status); 3039 } 3040 3041 static void 3042 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event) 3043 { 3044 struct bfa_flash *flash = cbarg; 3045 3046 switch (event) { 3047 case BFA_IOC_E_DISABLED: 3048 case BFA_IOC_E_FAILED: 3049 if (flash->op_busy) { 3050 flash->status = BFA_STATUS_IOC_FAILURE; 3051 flash->cbfn(flash->cbarg, flash->status); 3052 flash->op_busy = 0; 3053 } 3054 break; 3055 default: 3056 break; 3057 } 3058 } 3059 3060 /* 3061 * Send flash write request. 3062 */ 3063 static void 3064 bfa_flash_write_send(struct bfa_flash *flash) 3065 { 3066 struct bfi_flash_write_req *msg = 3067 (struct bfi_flash_write_req *) flash->mb.msg; 3068 u32 len; 3069 3070 msg->type = be32_to_cpu(flash->type); 3071 msg->instance = flash->instance; 3072 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); 3073 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? 3074 flash->residue : BFA_FLASH_DMA_BUF_SZ; 3075 msg->length = be32_to_cpu(len); 3076 3077 /* indicate if it's the last msg of the whole write operation */ 3078 msg->last = (len == flash->residue) ? 1 : 0; 3079 3080 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, 3081 bfa_ioc_portid(flash->ioc)); 3082 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); 3083 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); 3084 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3085 3086 flash->residue -= len; 3087 flash->offset += len; 3088 } 3089 3090 /** 3091 * bfa_flash_read_send - Send flash read request. 3092 * 3093 * @cbarg: callback argument 3094 */ 3095 static void 3096 bfa_flash_read_send(void *cbarg) 3097 { 3098 struct bfa_flash *flash = cbarg; 3099 struct bfi_flash_read_req *msg = 3100 (struct bfi_flash_read_req *) flash->mb.msg; 3101 u32 len; 3102 3103 msg->type = be32_to_cpu(flash->type); 3104 msg->instance = flash->instance; 3105 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); 3106 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? 3107 flash->residue : BFA_FLASH_DMA_BUF_SZ; 3108 msg->length = be32_to_cpu(len); 3109 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, 3110 bfa_ioc_portid(flash->ioc)); 3111 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); 3112 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3113 } 3114 3115 /** 3116 * bfa_flash_intr - Process flash response messages upon receiving interrupts. 3117 * 3118 * @flasharg: flash structure 3119 * @msg: message structure 3120 */ 3121 static void 3122 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) 3123 { 3124 struct bfa_flash *flash = flasharg; 3125 u32 status; 3126 3127 union { 3128 struct bfi_flash_query_rsp *query; 3129 struct bfi_flash_write_rsp *write; 3130 struct bfi_flash_read_rsp *read; 3131 struct bfi_mbmsg *msg; 3132 } m; 3133 3134 m.msg = msg; 3135 3136 /* receiving response after ioc failure */ 3137 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) 3138 return; 3139 3140 switch (msg->mh.msg_id) { 3141 case BFI_FLASH_I2H_QUERY_RSP: 3142 status = be32_to_cpu(m.query->status); 3143 if (status == BFA_STATUS_OK) { 3144 u32 i; 3145 struct bfa_flash_attr *attr, *f; 3146 3147 attr = (struct bfa_flash_attr *) flash->ubuf; 3148 f = (struct bfa_flash_attr *) flash->dbuf_kva; 3149 attr->status = be32_to_cpu(f->status); 3150 attr->npart = be32_to_cpu(f->npart); 3151 for (i = 0; i < attr->npart; i++) { 3152 attr->part[i].part_type = 3153 be32_to_cpu(f->part[i].part_type); 3154 attr->part[i].part_instance = 3155 be32_to_cpu(f->part[i].part_instance); 3156 attr->part[i].part_off = 3157 be32_to_cpu(f->part[i].part_off); 3158 attr->part[i].part_size = 3159 be32_to_cpu(f->part[i].part_size); 3160 attr->part[i].part_len = 3161 be32_to_cpu(f->part[i].part_len); 3162 attr->part[i].part_status = 3163 be32_to_cpu(f->part[i].part_status); 3164 } 3165 } 3166 flash->status = status; 3167 bfa_flash_cb(flash); 3168 break; 3169 case BFI_FLASH_I2H_WRITE_RSP: 3170 status = be32_to_cpu(m.write->status); 3171 if (status != BFA_STATUS_OK || flash->residue == 0) { 3172 flash->status = status; 3173 bfa_flash_cb(flash); 3174 } else 3175 bfa_flash_write_send(flash); 3176 break; 3177 case BFI_FLASH_I2H_READ_RSP: 3178 status = be32_to_cpu(m.read->status); 3179 if (status != BFA_STATUS_OK) { 3180 flash->status = status; 3181 bfa_flash_cb(flash); 3182 } else { 3183 u32 len = be32_to_cpu(m.read->length); 3184 memcpy(flash->ubuf + flash->offset, 3185 flash->dbuf_kva, len); 3186 flash->residue -= len; 3187 flash->offset += len; 3188 if (flash->residue == 0) { 3189 flash->status = status; 3190 bfa_flash_cb(flash); 3191 } else 3192 bfa_flash_read_send(flash); 3193 } 3194 break; 3195 case BFI_FLASH_I2H_BOOT_VER_RSP: 3196 case BFI_FLASH_I2H_EVENT: 3197 break; 3198 default: 3199 WARN_ON(1); 3200 } 3201 } 3202 3203 /* 3204 * Flash memory info API. 3205 */ 3206 u32 3207 bfa_nw_flash_meminfo(void) 3208 { 3209 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3210 } 3211 3212 /** 3213 * bfa_nw_flash_attach - Flash attach API. 3214 * 3215 * @flash: flash structure 3216 * @ioc: ioc structure 3217 * @dev: device structure 3218 */ 3219 void 3220 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) 3221 { 3222 flash->ioc = ioc; 3223 flash->cbfn = NULL; 3224 flash->cbarg = NULL; 3225 flash->op_busy = 0; 3226 3227 bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); 3228 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); 3229 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); 3230 } 3231 3232 /** 3233 * bfa_nw_flash_memclaim - Claim memory for flash 3234 * 3235 * @flash: flash structure 3236 * @dm_kva: pointer to virtual memory address 3237 * @dm_pa: physical memory address 3238 */ 3239 void 3240 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) 3241 { 3242 flash->dbuf_kva = dm_kva; 3243 flash->dbuf_pa = dm_pa; 3244 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); 3245 dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3246 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3247 } 3248 3249 /** 3250 * bfa_nw_flash_get_attr - Get flash attribute. 3251 * 3252 * @flash: flash structure 3253 * @attr: flash attribute structure 3254 * @cbfn: callback function 3255 * @cbarg: callback argument 3256 * 3257 * Return status. 3258 */ 3259 enum bfa_status 3260 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr, 3261 bfa_cb_flash cbfn, void *cbarg) 3262 { 3263 struct bfi_flash_query_req *msg = 3264 (struct bfi_flash_query_req *) flash->mb.msg; 3265 3266 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3267 return BFA_STATUS_IOC_NON_OP; 3268 3269 if (flash->op_busy) 3270 return BFA_STATUS_DEVBUSY; 3271 3272 flash->op_busy = 1; 3273 flash->cbfn = cbfn; 3274 flash->cbarg = cbarg; 3275 flash->ubuf = (u8 *) attr; 3276 3277 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, 3278 bfa_ioc_portid(flash->ioc)); 3279 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa); 3280 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3281 3282 return BFA_STATUS_OK; 3283 } 3284 3285 /** 3286 * bfa_nw_flash_update_part - Update flash partition. 3287 * 3288 * @flash: flash structure 3289 * @type: flash partition type 3290 * @instance: flash partition instance 3291 * @buf: update data buffer 3292 * @len: data buffer length 3293 * @offset: offset relative to the partition starting address 3294 * @cbfn: callback function 3295 * @cbarg: callback argument 3296 * 3297 * Return status. 3298 */ 3299 enum bfa_status 3300 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance, 3301 void *buf, u32 len, u32 offset, 3302 bfa_cb_flash cbfn, void *cbarg) 3303 { 3304 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3305 return BFA_STATUS_IOC_NON_OP; 3306 3307 /* 3308 * 'len' must be in word (4-byte) boundary 3309 */ 3310 if (!len || (len & 0x03)) 3311 return BFA_STATUS_FLASH_BAD_LEN; 3312 3313 if (type == BFA_FLASH_PART_MFG) 3314 return BFA_STATUS_EINVAL; 3315 3316 if (flash->op_busy) 3317 return BFA_STATUS_DEVBUSY; 3318 3319 flash->op_busy = 1; 3320 flash->cbfn = cbfn; 3321 flash->cbarg = cbarg; 3322 flash->type = type; 3323 flash->instance = instance; 3324 flash->residue = len; 3325 flash->offset = 0; 3326 flash->addr_off = offset; 3327 flash->ubuf = buf; 3328 3329 bfa_flash_write_send(flash); 3330 3331 return BFA_STATUS_OK; 3332 } 3333 3334 /** 3335 * bfa_nw_flash_read_part - Read flash partition. 3336 * 3337 * @flash: flash structure 3338 * @type: flash partition type 3339 * @instance: flash partition instance 3340 * @buf: read data buffer 3341 * @len: data buffer length 3342 * @offset: offset relative to the partition starting address 3343 * @cbfn: callback function 3344 * @cbarg: callback argument 3345 * 3346 * Return status. 3347 */ 3348 enum bfa_status 3349 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance, 3350 void *buf, u32 len, u32 offset, 3351 bfa_cb_flash cbfn, void *cbarg) 3352 { 3353 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3354 return BFA_STATUS_IOC_NON_OP; 3355 3356 /* 3357 * 'len' must be in word (4-byte) boundary 3358 */ 3359 if (!len || (len & 0x03)) 3360 return BFA_STATUS_FLASH_BAD_LEN; 3361 3362 if (flash->op_busy) 3363 return BFA_STATUS_DEVBUSY; 3364 3365 flash->op_busy = 1; 3366 flash->cbfn = cbfn; 3367 flash->cbarg = cbarg; 3368 flash->type = type; 3369 flash->instance = instance; 3370 flash->residue = len; 3371 flash->offset = 0; 3372 flash->addr_off = offset; 3373 flash->ubuf = buf; 3374 3375 bfa_flash_read_send(flash); 3376 3377 return BFA_STATUS_OK; 3378 } 3379