Merge tag 'nfs-for-3.11-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[cascardo/linux.git] / drivers / net / ethernet / brocade / bna / bfa_ioc.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18
19 #include "bfa_ioc.h"
20 #include "bfi_reg.h"
21 #include "bfa_defs.h"
22
23 /* IOC local definitions */
24
25 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
26
27 #define bfa_ioc_firmware_lock(__ioc)                    \
28                         ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
29 #define bfa_ioc_firmware_unlock(__ioc)                  \
30                         ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
31 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
32 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
33 #define bfa_ioc_notify_fail(__ioc)                      \
34                         ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
35 #define bfa_ioc_sync_start(__ioc)               \
36                         ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
37 #define bfa_ioc_sync_join(__ioc)                        \
38                         ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
39 #define bfa_ioc_sync_leave(__ioc)                       \
40                         ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
41 #define bfa_ioc_sync_ack(__ioc)                         \
42                         ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
43 #define bfa_ioc_sync_complete(__ioc)                    \
44                         ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
45
46 #define bfa_ioc_mbox_cmd_pending(__ioc)         \
47                         (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
48                         readl((__ioc)->ioc_regs.hfn_mbox_cmd))
49
50 static bool bfa_nw_auto_recover = true;
51
52 /*
53  * forward declarations
54  */
55 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
56 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
57 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
58 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
59 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
60 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
61 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
62 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
63 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
64 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
65 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
66 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
67 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
68 static void bfa_ioc_recover(struct bfa_ioc *ioc);
69 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
70 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
71 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
72 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
73 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
74 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
75 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
76 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
77 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
78 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
79 static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
80                          u32 boot_param);
81 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
82 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
83                                                 char *serial_num);
84 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
85                                                 char *fw_ver);
86 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
87                                                 char *chip_rev);
88 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
89                                                 char *optrom_ver);
90 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
91                                                 char *manufacturer);
92 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
93 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
94
95 /* IOC state machine definitions/declarations */
96 enum ioc_event {
97         IOC_E_RESET             = 1,    /*!< IOC reset request          */
98         IOC_E_ENABLE            = 2,    /*!< IOC enable request         */
99         IOC_E_DISABLE           = 3,    /*!< IOC disable request        */
100         IOC_E_DETACH            = 4,    /*!< driver detach cleanup      */
101         IOC_E_ENABLED           = 5,    /*!< f/w enabled                */
102         IOC_E_FWRSP_GETATTR     = 6,    /*!< IOC get attribute response */
103         IOC_E_DISABLED          = 7,    /*!< f/w disabled               */
104         IOC_E_PFFAILED          = 8,    /*!< failure notice by iocpf sm */
105         IOC_E_HBFAIL            = 9,    /*!< heartbeat failure          */
106         IOC_E_HWERROR           = 10,   /*!< hardware error interrupt   */
107         IOC_E_TIMEOUT           = 11,   /*!< timeout                    */
108         IOC_E_HWFAILED          = 12,   /*!< PCI mapping failure notice */
109 };
110
111 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
112 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
113 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
114 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
115 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
121
122 static struct bfa_sm_table ioc_sm_table[] = {
123         {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
124         {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
125         {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
126         {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
127         {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
128         {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
129         {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
130         {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
131         {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
132         {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
133 };
134
135 /*
136  * Forward declareations for iocpf state machine
137  */
138 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
139 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
140 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
141 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
142 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
143 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
144
145 /* IOCPF state machine events */
146 enum iocpf_event {
147         IOCPF_E_ENABLE          = 1,    /*!< IOCPF enable request       */
148         IOCPF_E_DISABLE         = 2,    /*!< IOCPF disable request      */
149         IOCPF_E_STOP            = 3,    /*!< stop on driver detach      */
150         IOCPF_E_FWREADY         = 4,    /*!< f/w initialization done    */
151         IOCPF_E_FWRSP_ENABLE    = 5,    /*!< enable f/w response        */
152         IOCPF_E_FWRSP_DISABLE   = 6,    /*!< disable f/w response       */
153         IOCPF_E_FAIL            = 7,    /*!< failure notice by ioc sm   */
154         IOCPF_E_INITFAIL        = 8,    /*!< init fail notice by ioc sm */
155         IOCPF_E_GETATTRFAIL     = 9,    /*!< init fail notice by ioc sm */
156         IOCPF_E_SEMLOCKED       = 10,   /*!< h/w semaphore is locked    */
157         IOCPF_E_TIMEOUT         = 11,   /*!< f/w response timeout       */
158         IOCPF_E_SEM_ERROR       = 12,   /*!< h/w sem mapping error      */
159 };
160
161 /* IOCPF states */
162 enum bfa_iocpf_state {
163         BFA_IOCPF_RESET         = 1,    /*!< IOC is in reset state */
164         BFA_IOCPF_SEMWAIT       = 2,    /*!< Waiting for IOC h/w semaphore */
165         BFA_IOCPF_HWINIT        = 3,    /*!< IOC h/w is being initialized */
166         BFA_IOCPF_READY         = 4,    /*!< IOCPF is initialized */
167         BFA_IOCPF_INITFAIL      = 5,    /*!< IOCPF failed */
168         BFA_IOCPF_FAIL          = 6,    /*!< IOCPF failed */
169         BFA_IOCPF_DISABLING     = 7,    /*!< IOCPF is being disabled */
170         BFA_IOCPF_DISABLED      = 8,    /*!< IOCPF is disabled */
171         BFA_IOCPF_FWMISMATCH    = 9,    /*!< IOC f/w different from drivers */
172 };
173
174 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
175 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
176 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
177 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
178 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
179 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
180 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
181 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
182                                                 enum iocpf_event);
183 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
184 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
185 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
186 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
187 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
188                                                 enum iocpf_event);
189 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
190
191 static struct bfa_sm_table iocpf_sm_table[] = {
192         {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
193         {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
194         {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
195         {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
196         {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
197         {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
198         {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
199         {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
200         {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
201         {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
202         {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
203         {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
204         {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
205         {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
206 };
207
208 /* IOC State Machine */
209
210 /* Beginning state. IOC uninit state. */
211 static void
212 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
213 {
214 }
215
216 /* IOC is in uninit state. */
217 static void
218 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
219 {
220         switch (event) {
221         case IOC_E_RESET:
222                 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
223                 break;
224
225         default:
226                 bfa_sm_fault(event);
227         }
228 }
229
230 /* Reset entry actions -- initialize state machine */
231 static void
232 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
233 {
234         bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
235 }
236
237 /* IOC is in reset state. */
238 static void
239 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
240 {
241         switch (event) {
242         case IOC_E_ENABLE:
243                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
244                 break;
245
246         case IOC_E_DISABLE:
247                 bfa_ioc_disable_comp(ioc);
248                 break;
249
250         case IOC_E_DETACH:
251                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
252                 break;
253
254         default:
255                 bfa_sm_fault(event);
256         }
257 }
258
259 static void
260 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
261 {
262         bfa_iocpf_enable(ioc);
263 }
264
265 /* Host IOC function is being enabled, awaiting response from firmware.
266  * Semaphore is acquired.
267  */
268 static void
269 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
270 {
271         switch (event) {
272         case IOC_E_ENABLED:
273                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
274                 break;
275
276         case IOC_E_PFFAILED:
277                 /* !!! fall through !!! */
278         case IOC_E_HWERROR:
279                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
280                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
281                 if (event != IOC_E_PFFAILED)
282                         bfa_iocpf_initfail(ioc);
283                 break;
284
285         case IOC_E_HWFAILED:
286                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
287                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
288                 break;
289
290         case IOC_E_DISABLE:
291                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
292                 break;
293
294         case IOC_E_DETACH:
295                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
296                 bfa_iocpf_stop(ioc);
297                 break;
298
299         case IOC_E_ENABLE:
300                 break;
301
302         default:
303                 bfa_sm_fault(event);
304         }
305 }
306
307 /* Semaphore should be acquired for version check. */
308 static void
309 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
310 {
311         mod_timer(&ioc->ioc_timer, jiffies +
312                 msecs_to_jiffies(BFA_IOC_TOV));
313         bfa_ioc_send_getattr(ioc);
314 }
315
316 /* IOC configuration in progress. Timer is active. */
317 static void
318 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
319 {
320         switch (event) {
321         case IOC_E_FWRSP_GETATTR:
322                 del_timer(&ioc->ioc_timer);
323                 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
324                 break;
325
326         case IOC_E_PFFAILED:
327         case IOC_E_HWERROR:
328                 del_timer(&ioc->ioc_timer);
329                 /* fall through */
330         case IOC_E_TIMEOUT:
331                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
332                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
333                 if (event != IOC_E_PFFAILED)
334                         bfa_iocpf_getattrfail(ioc);
335                 break;
336
337         case IOC_E_DISABLE:
338                 del_timer(&ioc->ioc_timer);
339                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
340                 break;
341
342         case IOC_E_ENABLE:
343                 break;
344
345         default:
346                 bfa_sm_fault(event);
347         }
348 }
349
350 static void
351 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
352 {
353         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
354         bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
355         bfa_ioc_hb_monitor(ioc);
356 }
357
358 static void
359 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
360 {
361         switch (event) {
362         case IOC_E_ENABLE:
363                 break;
364
365         case IOC_E_DISABLE:
366                 bfa_ioc_hb_stop(ioc);
367                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
368                 break;
369
370         case IOC_E_PFFAILED:
371         case IOC_E_HWERROR:
372                 bfa_ioc_hb_stop(ioc);
373                 /* !!! fall through !!! */
374         case IOC_E_HBFAIL:
375                 if (ioc->iocpf.auto_recover)
376                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
377                 else
378                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
379
380                 bfa_ioc_fail_notify(ioc);
381
382                 if (event != IOC_E_PFFAILED)
383                         bfa_iocpf_fail(ioc);
384                 break;
385
386         default:
387                 bfa_sm_fault(event);
388         }
389 }
390
391 static void
392 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
393 {
394         bfa_iocpf_disable(ioc);
395 }
396
397 /* IOC is being disabled */
398 static void
399 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
400 {
401         switch (event) {
402         case IOC_E_DISABLED:
403                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
404                 break;
405
406         case IOC_E_HWERROR:
407                 /*
408                  * No state change.  Will move to disabled state
409                  * after iocpf sm completes failure processing and
410                  * moves to disabled state.
411                  */
412                 bfa_iocpf_fail(ioc);
413                 break;
414
415         case IOC_E_HWFAILED:
416                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
417                 bfa_ioc_disable_comp(ioc);
418                 break;
419
420         default:
421                 bfa_sm_fault(event);
422         }
423 }
424
425 /* IOC disable completion entry. */
426 static void
427 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
428 {
429         bfa_ioc_disable_comp(ioc);
430 }
431
432 static void
433 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
434 {
435         switch (event) {
436         case IOC_E_ENABLE:
437                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
438                 break;
439
440         case IOC_E_DISABLE:
441                 ioc->cbfn->disable_cbfn(ioc->bfa);
442                 break;
443
444         case IOC_E_DETACH:
445                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
446                 bfa_iocpf_stop(ioc);
447                 break;
448
449         default:
450                 bfa_sm_fault(event);
451         }
452 }
453
454 static void
455 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
456 {
457 }
458
459 /* Hardware initialization retry. */
460 static void
461 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
462 {
463         switch (event) {
464         case IOC_E_ENABLED:
465                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
466                 break;
467
468         case IOC_E_PFFAILED:
469         case IOC_E_HWERROR:
470                 /**
471                  * Initialization retry failed.
472                  */
473                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
474                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
475                 if (event != IOC_E_PFFAILED)
476                         bfa_iocpf_initfail(ioc);
477                 break;
478
479         case IOC_E_HWFAILED:
480                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
481                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
482                 break;
483
484         case IOC_E_ENABLE:
485                 break;
486
487         case IOC_E_DISABLE:
488                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
489                 break;
490
491         case IOC_E_DETACH:
492                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
493                 bfa_iocpf_stop(ioc);
494                 break;
495
496         default:
497                 bfa_sm_fault(event);
498         }
499 }
500
501 static void
502 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
503 {
504 }
505
506 /* IOC failure. */
507 static void
508 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
509 {
510         switch (event) {
511         case IOC_E_ENABLE:
512                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
513                 break;
514
515         case IOC_E_DISABLE:
516                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
517                 break;
518
519         case IOC_E_DETACH:
520                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
521                 bfa_iocpf_stop(ioc);
522                 break;
523
524         case IOC_E_HWERROR:
525                 /* HB failure notification, ignore. */
526                 break;
527
528         default:
529                 bfa_sm_fault(event);
530         }
531 }
532
533 static void
534 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
535 {
536 }
537
538 /* IOC failure. */
539 static void
540 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
541 {
542         switch (event) {
543
544         case IOC_E_ENABLE:
545                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
546                 break;
547
548         case IOC_E_DISABLE:
549                 ioc->cbfn->disable_cbfn(ioc->bfa);
550                 break;
551
552         case IOC_E_DETACH:
553                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
554                 break;
555
556         default:
557                 bfa_sm_fault(event);
558         }
559 }
560
561 /* IOCPF State Machine */
562
563 /* Reset entry actions -- initialize state machine */
564 static void
565 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
566 {
567         iocpf->fw_mismatch_notified = false;
568         iocpf->auto_recover = bfa_nw_auto_recover;
569 }
570
571 /* Beginning state. IOC is in reset state. */
572 static void
573 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
574 {
575         switch (event) {
576         case IOCPF_E_ENABLE:
577                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
578                 break;
579
580         case IOCPF_E_STOP:
581                 break;
582
583         default:
584                 bfa_sm_fault(event);
585         }
586 }
587
588 /* Semaphore should be acquired for version check. */
589 static void
590 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
591 {
592         bfa_ioc_hw_sem_init(iocpf->ioc);
593         bfa_ioc_hw_sem_get(iocpf->ioc);
594 }
595
596 /* Awaiting h/w semaphore to continue with version check. */
597 static void
598 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
599 {
600         struct bfa_ioc *ioc = iocpf->ioc;
601
602         switch (event) {
603         case IOCPF_E_SEMLOCKED:
604                 if (bfa_ioc_firmware_lock(ioc)) {
605                         if (bfa_ioc_sync_start(ioc)) {
606                                 bfa_ioc_sync_join(ioc);
607                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
608                         } else {
609                                 bfa_ioc_firmware_unlock(ioc);
610                                 bfa_nw_ioc_hw_sem_release(ioc);
611                                 mod_timer(&ioc->sem_timer, jiffies +
612                                         msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
613                         }
614                 } else {
615                         bfa_nw_ioc_hw_sem_release(ioc);
616                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
617                 }
618                 break;
619
620         case IOCPF_E_SEM_ERROR:
621                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
622                 bfa_ioc_pf_hwfailed(ioc);
623                 break;
624
625         case IOCPF_E_DISABLE:
626                 bfa_ioc_hw_sem_get_cancel(ioc);
627                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
628                 bfa_ioc_pf_disabled(ioc);
629                 break;
630
631         case IOCPF_E_STOP:
632                 bfa_ioc_hw_sem_get_cancel(ioc);
633                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
634                 break;
635
636         default:
637                 bfa_sm_fault(event);
638         }
639 }
640
641 /* Notify enable completion callback */
642 static void
643 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
644 {
645         /* Call only the first time sm enters fwmismatch state. */
646         if (!iocpf->fw_mismatch_notified)
647                 bfa_ioc_pf_fwmismatch(iocpf->ioc);
648
649         iocpf->fw_mismatch_notified = true;
650         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
651                 msecs_to_jiffies(BFA_IOC_TOV));
652 }
653
654 /* Awaiting firmware version match. */
655 static void
656 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
657 {
658         struct bfa_ioc *ioc = iocpf->ioc;
659
660         switch (event) {
661         case IOCPF_E_TIMEOUT:
662                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
663                 break;
664
665         case IOCPF_E_DISABLE:
666                 del_timer(&ioc->iocpf_timer);
667                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
668                 bfa_ioc_pf_disabled(ioc);
669                 break;
670
671         case IOCPF_E_STOP:
672                 del_timer(&ioc->iocpf_timer);
673                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
674                 break;
675
676         default:
677                 bfa_sm_fault(event);
678         }
679 }
680
681 /* Request for semaphore. */
682 static void
683 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
684 {
685         bfa_ioc_hw_sem_get(iocpf->ioc);
686 }
687
688 /* Awaiting semaphore for h/w initialzation. */
689 static void
690 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
691 {
692         struct bfa_ioc *ioc = iocpf->ioc;
693
694         switch (event) {
695         case IOCPF_E_SEMLOCKED:
696                 if (bfa_ioc_sync_complete(ioc)) {
697                         bfa_ioc_sync_join(ioc);
698                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
699                 } else {
700                         bfa_nw_ioc_hw_sem_release(ioc);
701                         mod_timer(&ioc->sem_timer, jiffies +
702                                 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
703                 }
704                 break;
705
706         case IOCPF_E_SEM_ERROR:
707                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
708                 bfa_ioc_pf_hwfailed(ioc);
709                 break;
710
711         case IOCPF_E_DISABLE:
712                 bfa_ioc_hw_sem_get_cancel(ioc);
713                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
714                 break;
715
716         default:
717                 bfa_sm_fault(event);
718         }
719 }
720
721 static void
722 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
723 {
724         iocpf->poll_time = 0;
725         bfa_ioc_reset(iocpf->ioc, false);
726 }
727
728 /* Hardware is being initialized. Interrupts are enabled.
729  * Holding hardware semaphore lock.
730  */
731 static void
732 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
733 {
734         struct bfa_ioc *ioc = iocpf->ioc;
735
736         switch (event) {
737         case IOCPF_E_FWREADY:
738                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
739                 break;
740
741         case IOCPF_E_TIMEOUT:
742                 bfa_nw_ioc_hw_sem_release(ioc);
743                         bfa_ioc_pf_failed(ioc);
744                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
745                 break;
746
747         case IOCPF_E_DISABLE:
748                 del_timer(&ioc->iocpf_timer);
749                 bfa_ioc_sync_leave(ioc);
750                 bfa_nw_ioc_hw_sem_release(ioc);
751                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
752                 break;
753
754         default:
755                 bfa_sm_fault(event);
756         }
757 }
758
759 static void
760 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
761 {
762         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
763                 msecs_to_jiffies(BFA_IOC_TOV));
764         /**
765          * Enable Interrupts before sending fw IOC ENABLE cmd.
766          */
767         iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
768         bfa_ioc_send_enable(iocpf->ioc);
769 }
770
771 /* Host IOC function is being enabled, awaiting response from firmware.
772  * Semaphore is acquired.
773  */
774 static void
775 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
776 {
777         struct bfa_ioc *ioc = iocpf->ioc;
778
779         switch (event) {
780         case IOCPF_E_FWRSP_ENABLE:
781                 del_timer(&ioc->iocpf_timer);
782                 bfa_nw_ioc_hw_sem_release(ioc);
783                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
784                 break;
785
786         case IOCPF_E_INITFAIL:
787                 del_timer(&ioc->iocpf_timer);
788                 /*
789                  * !!! fall through !!!
790                  */
791         case IOCPF_E_TIMEOUT:
792                 bfa_nw_ioc_hw_sem_release(ioc);
793                 if (event == IOCPF_E_TIMEOUT)
794                         bfa_ioc_pf_failed(ioc);
795                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
796                 break;
797
798         case IOCPF_E_DISABLE:
799                 del_timer(&ioc->iocpf_timer);
800                 bfa_nw_ioc_hw_sem_release(ioc);
801                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
802                 break;
803
804         default:
805                 bfa_sm_fault(event);
806         }
807 }
808
809 static void
810 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
811 {
812         bfa_ioc_pf_enabled(iocpf->ioc);
813 }
814
815 static void
816 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
817 {
818         switch (event) {
819         case IOCPF_E_DISABLE:
820                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
821                 break;
822
823         case IOCPF_E_GETATTRFAIL:
824                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
825                 break;
826
827         case IOCPF_E_FAIL:
828                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
829                 break;
830
831         default:
832                 bfa_sm_fault(event);
833         }
834 }
835
836 static void
837 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
838 {
839         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
840                 msecs_to_jiffies(BFA_IOC_TOV));
841         bfa_ioc_send_disable(iocpf->ioc);
842 }
843
844 /* IOC is being disabled */
845 static void
846 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
847 {
848         struct bfa_ioc *ioc = iocpf->ioc;
849
850         switch (event) {
851         case IOCPF_E_FWRSP_DISABLE:
852                 del_timer(&ioc->iocpf_timer);
853                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
854                 break;
855
856         case IOCPF_E_FAIL:
857                 del_timer(&ioc->iocpf_timer);
858                 /*
859                  * !!! fall through !!!
860                  */
861
862         case IOCPF_E_TIMEOUT:
863                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
864                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
865                 break;
866
867         case IOCPF_E_FWRSP_ENABLE:
868                 break;
869
870         default:
871                 bfa_sm_fault(event);
872         }
873 }
874
875 static void
876 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
877 {
878         bfa_ioc_hw_sem_get(iocpf->ioc);
879 }
880
881 /* IOC hb ack request is being removed. */
882 static void
883 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
884 {
885         struct bfa_ioc *ioc = iocpf->ioc;
886
887         switch (event) {
888         case IOCPF_E_SEMLOCKED:
889                 bfa_ioc_sync_leave(ioc);
890                 bfa_nw_ioc_hw_sem_release(ioc);
891                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
892                 break;
893
894         case IOCPF_E_SEM_ERROR:
895                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
896                 bfa_ioc_pf_hwfailed(ioc);
897                 break;
898
899         case IOCPF_E_FAIL:
900                 break;
901
902         default:
903                 bfa_sm_fault(event);
904         }
905 }
906
907 /* IOC disable completion entry. */
908 static void
909 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
910 {
911         bfa_ioc_mbox_flush(iocpf->ioc);
912         bfa_ioc_pf_disabled(iocpf->ioc);
913 }
914
915 static void
916 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
917 {
918         struct bfa_ioc *ioc = iocpf->ioc;
919
920         switch (event) {
921         case IOCPF_E_ENABLE:
922                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
923                 break;
924
925         case IOCPF_E_STOP:
926                 bfa_ioc_firmware_unlock(ioc);
927                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
928                 break;
929
930         default:
931                 bfa_sm_fault(event);
932         }
933 }
934
935 static void
936 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
937 {
938         bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
939         bfa_ioc_hw_sem_get(iocpf->ioc);
940 }
941
942 /* Hardware initialization failed. */
943 static void
944 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
945 {
946         struct bfa_ioc *ioc = iocpf->ioc;
947
948         switch (event) {
949         case IOCPF_E_SEMLOCKED:
950                 bfa_ioc_notify_fail(ioc);
951                 bfa_ioc_sync_leave(ioc);
952                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
953                 bfa_nw_ioc_hw_sem_release(ioc);
954                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
955                 break;
956
957         case IOCPF_E_SEM_ERROR:
958                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
959                 bfa_ioc_pf_hwfailed(ioc);
960                 break;
961
962         case IOCPF_E_DISABLE:
963                 bfa_ioc_hw_sem_get_cancel(ioc);
964                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
965                 break;
966
967         case IOCPF_E_STOP:
968                 bfa_ioc_hw_sem_get_cancel(ioc);
969                 bfa_ioc_firmware_unlock(ioc);
970                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
971                 break;
972
973         case IOCPF_E_FAIL:
974                 break;
975
976         default:
977                 bfa_sm_fault(event);
978         }
979 }
980
981 static void
982 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
983 {
984 }
985
986 /* Hardware initialization failed. */
987 static void
988 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
989 {
990         struct bfa_ioc *ioc = iocpf->ioc;
991
992         switch (event) {
993         case IOCPF_E_DISABLE:
994                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
995                 break;
996
997         case IOCPF_E_STOP:
998                 bfa_ioc_firmware_unlock(ioc);
999                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1000                 break;
1001
1002         default:
1003                 bfa_sm_fault(event);
1004         }
1005 }
1006
1007 static void
1008 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1009 {
1010         /**
1011          * Mark IOC as failed in hardware and stop firmware.
1012          */
1013         bfa_ioc_lpu_stop(iocpf->ioc);
1014
1015         /**
1016          * Flush any queued up mailbox requests.
1017          */
1018         bfa_ioc_mbox_flush(iocpf->ioc);
1019         bfa_ioc_hw_sem_get(iocpf->ioc);
1020 }
1021
1022 /* IOC is in failed state. */
1023 static void
1024 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1025 {
1026         struct bfa_ioc *ioc = iocpf->ioc;
1027
1028         switch (event) {
1029         case IOCPF_E_SEMLOCKED:
1030                 bfa_ioc_sync_ack(ioc);
1031                 bfa_ioc_notify_fail(ioc);
1032                 if (!iocpf->auto_recover) {
1033                         bfa_ioc_sync_leave(ioc);
1034                         writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1035                         bfa_nw_ioc_hw_sem_release(ioc);
1036                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1037                 } else {
1038                         if (bfa_ioc_sync_complete(ioc))
1039                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1040                         else {
1041                                 bfa_nw_ioc_hw_sem_release(ioc);
1042                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1043                         }
1044                 }
1045                 break;
1046
1047         case IOCPF_E_SEM_ERROR:
1048                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1049                 bfa_ioc_pf_hwfailed(ioc);
1050                 break;
1051
1052         case IOCPF_E_DISABLE:
1053                 bfa_ioc_hw_sem_get_cancel(ioc);
1054                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1055                 break;
1056
1057         case IOCPF_E_FAIL:
1058                 break;
1059
1060         default:
1061                 bfa_sm_fault(event);
1062         }
1063 }
1064
1065 static void
1066 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1067 {
1068 }
1069
1070 /* IOC is in failed state. */
1071 static void
1072 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1073 {
1074         switch (event) {
1075         case IOCPF_E_DISABLE:
1076                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1077                 break;
1078
1079         default:
1080                 bfa_sm_fault(event);
1081         }
1082 }
1083
1084 /* BFA IOC private functions */
1085
1086 /* Notify common modules registered for notification. */
1087 static void
1088 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1089 {
1090         struct bfa_ioc_notify *notify;
1091         struct list_head                        *qe;
1092
1093         list_for_each(qe, &ioc->notify_q) {
1094                 notify = (struct bfa_ioc_notify *)qe;
1095                 notify->cbfn(notify->cbarg, event);
1096         }
1097 }
1098
1099 static void
1100 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1101 {
1102         ioc->cbfn->disable_cbfn(ioc->bfa);
1103         bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1104 }
1105
1106 bool
1107 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1108 {
1109         u32 r32;
1110         int cnt = 0;
1111 #define BFA_SEM_SPINCNT 3000
1112
1113         r32 = readl(sem_reg);
1114
1115         while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1116                 cnt++;
1117                 udelay(2);
1118                 r32 = readl(sem_reg);
1119         }
1120
1121         if (!(r32 & 1))
1122                 return true;
1123
1124         return false;
1125 }
1126
1127 void
1128 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1129 {
1130         readl(sem_reg);
1131         writel(1, sem_reg);
1132 }
1133
1134 /* Clear fwver hdr */
1135 static void
1136 bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1137 {
1138         u32 pgnum, pgoff, loff = 0;
1139         int i;
1140
1141         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1142         pgoff = PSS_SMEM_PGOFF(loff);
1143         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1144
1145         for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1146                 writel(0, ioc->ioc_regs.smem_page_start + loff);
1147                 loff += sizeof(u32);
1148         }
1149 }
1150
1151
1152 static void
1153 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1154 {
1155         struct bfi_ioc_image_hdr fwhdr;
1156         u32 fwstate, r32;
1157
1158         /* Spin on init semaphore to serialize. */
1159         r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1160         while (r32 & 0x1) {
1161                 udelay(20);
1162                 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1163         }
1164
1165         fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1166         if (fwstate == BFI_IOC_UNINIT) {
1167                 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1168                 return;
1169         }
1170
1171         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1172
1173         if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1174                 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1175                 return;
1176         }
1177
1178         bfa_ioc_fwver_clear(ioc);
1179         writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1180         writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
1181
1182         /*
1183          * Try to lock and then unlock the semaphore.
1184          */
1185         readl(ioc->ioc_regs.ioc_sem_reg);
1186         writel(1, ioc->ioc_regs.ioc_sem_reg);
1187
1188         /* Unlock init semaphore */
1189         writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1190 }
1191
1192 static void
1193 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1194 {
1195         u32     r32;
1196
1197         /**
1198          * First read to the semaphore register will return 0, subsequent reads
1199          * will return 1. Semaphore is released by writing 1 to the register
1200          */
1201         r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1202         if (r32 == ~0) {
1203                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1204                 return;
1205         }
1206         if (!(r32 & 1)) {
1207                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1208                 return;
1209         }
1210
1211         mod_timer(&ioc->sem_timer, jiffies +
1212                 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1213 }
1214
1215 void
1216 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1217 {
1218         writel(1, ioc->ioc_regs.ioc_sem_reg);
1219 }
1220
1221 static void
1222 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1223 {
1224         del_timer(&ioc->sem_timer);
1225 }
1226
1227 /* Initialize LPU local memory (aka secondary memory / SRAM) */
1228 static void
1229 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1230 {
1231         u32     pss_ctl;
1232         int             i;
1233 #define PSS_LMEM_INIT_TIME  10000
1234
1235         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1236         pss_ctl &= ~__PSS_LMEM_RESET;
1237         pss_ctl |= __PSS_LMEM_INIT_EN;
1238
1239         /*
1240          * i2c workaround 12.5khz clock
1241          */
1242         pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1243         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1244
1245         /**
1246          * wait for memory initialization to be complete
1247          */
1248         i = 0;
1249         do {
1250                 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1251                 i++;
1252         } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1253
1254         /**
1255          * If memory initialization is not successful, IOC timeout will catch
1256          * such failures.
1257          */
1258         BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1259
1260         pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1261         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1262 }
1263
1264 static void
1265 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1266 {
1267         u32     pss_ctl;
1268
1269         /**
1270          * Take processor out of reset.
1271          */
1272         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1273         pss_ctl &= ~__PSS_LPU0_RESET;
1274
1275         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1276 }
1277
1278 static void
1279 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1280 {
1281         u32     pss_ctl;
1282
1283         /**
1284          * Put processors in reset.
1285          */
1286         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1287         pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1288
1289         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1290 }
1291
1292 /* Get driver and firmware versions. */
1293 void
1294 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1295 {
1296         u32     pgnum;
1297         u32     loff = 0;
1298         int             i;
1299         u32     *fwsig = (u32 *) fwhdr;
1300
1301         pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1302         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1303
1304         for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1305              i++) {
1306                 fwsig[i] =
1307                         swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1308                 loff += sizeof(u32);
1309         }
1310 }
1311
1312 /* Returns TRUE if same. */
1313 bool
1314 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1315 {
1316         struct bfi_ioc_image_hdr *drv_fwhdr;
1317         int i;
1318
1319         drv_fwhdr = (struct bfi_ioc_image_hdr *)
1320                 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1321
1322         for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1323                 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1324                         return false;
1325         }
1326
1327         return true;
1328 }
1329
1330 /* Return true if current running version is valid. Firmware signature and
1331  * execution context (driver/bios) must match.
1332  */
1333 static bool
1334 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1335 {
1336         struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1337
1338         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1339         drv_fwhdr = (struct bfi_ioc_image_hdr *)
1340                 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1341
1342         if (fwhdr.signature != drv_fwhdr->signature)
1343                 return false;
1344
1345         if (swab32(fwhdr.bootenv) != boot_env)
1346                 return false;
1347
1348         return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1349 }
1350
1351 /* Conditionally flush any pending message from firmware at start. */
1352 static void
1353 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1354 {
1355         u32     r32;
1356
1357         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1358         if (r32)
1359                 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1360 }
1361
1362 static void
1363 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1364 {
1365         enum bfi_ioc_state ioc_fwstate;
1366         bool fwvalid;
1367         u32 boot_env;
1368
1369         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1370
1371         if (force)
1372                 ioc_fwstate = BFI_IOC_UNINIT;
1373
1374         boot_env = BFI_FWBOOT_ENV_OS;
1375
1376         /**
1377          * check if firmware is valid
1378          */
1379         fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1380                 false : bfa_ioc_fwver_valid(ioc, boot_env);
1381
1382         if (!fwvalid) {
1383                 bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1384                 bfa_ioc_poll_fwinit(ioc);
1385                 return;
1386         }
1387
1388         /**
1389          * If hardware initialization is in progress (initialized by other IOC),
1390          * just wait for an initialization completion interrupt.
1391          */
1392         if (ioc_fwstate == BFI_IOC_INITING) {
1393                 bfa_ioc_poll_fwinit(ioc);
1394                 return;
1395         }
1396
1397         /**
1398          * If IOC function is disabled and firmware version is same,
1399          * just re-enable IOC.
1400          */
1401         if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1402                 /**
1403                  * When using MSI-X any pending firmware ready event should
1404                  * be flushed. Otherwise MSI-X interrupts are not delivered.
1405                  */
1406                 bfa_ioc_msgflush(ioc);
1407                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1408                 return;
1409         }
1410
1411         /**
1412          * Initialize the h/w for any other states.
1413          */
1414         bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1415         bfa_ioc_poll_fwinit(ioc);
1416 }
1417
1418 void
1419 bfa_nw_ioc_timeout(void *ioc_arg)
1420 {
1421         struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1422
1423         bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1424 }
1425
1426 static void
1427 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1428 {
1429         u32 *msgp = (u32 *) ioc_msg;
1430         u32 i;
1431
1432         BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1433
1434         /*
1435          * first write msg to mailbox registers
1436          */
1437         for (i = 0; i < len / sizeof(u32); i++)
1438                 writel(cpu_to_le32(msgp[i]),
1439                               ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1440
1441         for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1442                 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1443
1444         /*
1445          * write 1 to mailbox CMD to trigger LPU event
1446          */
1447         writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1448         (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1449 }
1450
1451 static void
1452 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1453 {
1454         struct bfi_ioc_ctrl_req enable_req;
1455         struct timeval tv;
1456
1457         bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1458                     bfa_ioc_portid(ioc));
1459         enable_req.clscode = htons(ioc->clscode);
1460         do_gettimeofday(&tv);
1461         enable_req.tv_sec = ntohl(tv.tv_sec);
1462         bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1463 }
1464
1465 static void
1466 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1467 {
1468         struct bfi_ioc_ctrl_req disable_req;
1469
1470         bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1471                     bfa_ioc_portid(ioc));
1472         bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1473 }
1474
1475 static void
1476 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1477 {
1478         struct bfi_ioc_getattr_req attr_req;
1479
1480         bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1481                     bfa_ioc_portid(ioc));
1482         bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1483         bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1484 }
1485
1486 void
1487 bfa_nw_ioc_hb_check(void *cbarg)
1488 {
1489         struct bfa_ioc *ioc = cbarg;
1490         u32     hb_count;
1491
1492         hb_count = readl(ioc->ioc_regs.heartbeat);
1493         if (ioc->hb_count == hb_count) {
1494                 bfa_ioc_recover(ioc);
1495                 return;
1496         } else {
1497                 ioc->hb_count = hb_count;
1498         }
1499
1500         bfa_ioc_mbox_poll(ioc);
1501         mod_timer(&ioc->hb_timer, jiffies +
1502                 msecs_to_jiffies(BFA_IOC_HB_TOV));
1503 }
1504
1505 static void
1506 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1507 {
1508         ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1509         mod_timer(&ioc->hb_timer, jiffies +
1510                 msecs_to_jiffies(BFA_IOC_HB_TOV));
1511 }
1512
1513 static void
1514 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1515 {
1516         del_timer(&ioc->hb_timer);
1517 }
1518
1519 /* Initiate a full firmware download. */
1520 static void
1521 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1522                     u32 boot_env)
1523 {
1524         u32 *fwimg;
1525         u32 pgnum;
1526         u32 loff = 0;
1527         u32 chunkno = 0;
1528         u32 i;
1529         u32 asicmode;
1530
1531         fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1532
1533         pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1534
1535         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1536
1537         for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1538                 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1539                         chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1540                         fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1541                                         BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1542                 }
1543
1544                 /**
1545                  * write smem
1546                  */
1547                 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1548                               ((ioc->ioc_regs.smem_page_start) + (loff)));
1549
1550                 loff += sizeof(u32);
1551
1552                 /**
1553                  * handle page offset wrap around
1554                  */
1555                 loff = PSS_SMEM_PGOFF(loff);
1556                 if (loff == 0) {
1557                         pgnum++;
1558                         writel(pgnum,
1559                                       ioc->ioc_regs.host_page_num_fn);
1560                 }
1561         }
1562
1563         writel(bfa_ioc_smem_pgnum(ioc, 0),
1564                       ioc->ioc_regs.host_page_num_fn);
1565
1566         /*
1567          * Set boot type, env and device mode at the end.
1568         */
1569         asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1570                                         ioc->port0_mode, ioc->port1_mode);
1571         writel(asicmode, ((ioc->ioc_regs.smem_page_start)
1572                         + BFI_FWBOOT_DEVMODE_OFF));
1573         writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1574                         + (BFI_FWBOOT_TYPE_OFF)));
1575         writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1576                         + (BFI_FWBOOT_ENV_OFF)));
1577 }
1578
1579 static void
1580 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1581 {
1582         bfa_ioc_hwinit(ioc, force);
1583 }
1584
1585 /* BFA ioc enable reply by firmware */
1586 static void
1587 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1588                         u8 cap_bm)
1589 {
1590         struct bfa_iocpf *iocpf = &ioc->iocpf;
1591
1592         ioc->port_mode = ioc->port_mode_cfg = port_mode;
1593         ioc->ad_cap_bm = cap_bm;
1594         bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1595 }
1596
1597 /* Update BFA configuration from firmware configuration. */
1598 static void
1599 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1600 {
1601         struct bfi_ioc_attr *attr = ioc->attr;
1602
1603         attr->adapter_prop  = ntohl(attr->adapter_prop);
1604         attr->card_type     = ntohl(attr->card_type);
1605         attr->maxfrsize     = ntohs(attr->maxfrsize);
1606
1607         bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1608 }
1609
1610 /* Attach time initialization of mbox logic. */
1611 static void
1612 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1613 {
1614         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1615         int     mc;
1616
1617         INIT_LIST_HEAD(&mod->cmd_q);
1618         for (mc = 0; mc < BFI_MC_MAX; mc++) {
1619                 mod->mbhdlr[mc].cbfn = NULL;
1620                 mod->mbhdlr[mc].cbarg = ioc->bfa;
1621         }
1622 }
1623
1624 /* Mbox poll timer -- restarts any pending mailbox requests. */
1625 static void
1626 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1627 {
1628         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1629         struct bfa_mbox_cmd *cmd;
1630         bfa_mbox_cmd_cbfn_t cbfn;
1631         void *cbarg;
1632         u32 stat;
1633
1634         /**
1635          * If no command pending, do nothing
1636          */
1637         if (list_empty(&mod->cmd_q))
1638                 return;
1639
1640         /**
1641          * If previous command is not yet fetched by firmware, do nothing
1642          */
1643         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1644         if (stat)
1645                 return;
1646
1647         /**
1648          * Enqueue command to firmware.
1649          */
1650         bfa_q_deq(&mod->cmd_q, &cmd);
1651         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1652
1653         /**
1654          * Give a callback to the client, indicating that the command is sent
1655          */
1656         if (cmd->cbfn) {
1657                 cbfn = cmd->cbfn;
1658                 cbarg = cmd->cbarg;
1659                 cmd->cbfn = NULL;
1660                 cbfn(cbarg);
1661         }
1662 }
1663
1664 /* Cleanup any pending requests. */
1665 static void
1666 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1667 {
1668         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1669         struct bfa_mbox_cmd *cmd;
1670
1671         while (!list_empty(&mod->cmd_q))
1672                 bfa_q_deq(&mod->cmd_q, &cmd);
1673 }
1674
1675 /**
1676  * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
1677  *
1678  * @ioc:     memory for IOC
1679  * @tbuf:    app memory to store data from smem
1680  * @soff:    smem offset
1681  * @sz:      size of smem in bytes
1682  */
1683 static int
1684 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
1685 {
1686         u32 pgnum, loff, r32;
1687         int i, len;
1688         u32 *buf = tbuf;
1689
1690         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1691         loff = PSS_SMEM_PGOFF(soff);
1692
1693         /*
1694          *  Hold semaphore to serialize pll init and fwtrc.
1695         */
1696         if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
1697                 return 1;
1698
1699         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1700
1701         len = sz/sizeof(u32);
1702         for (i = 0; i < len; i++) {
1703                 r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1704                 buf[i] = be32_to_cpu(r32);
1705                 loff += sizeof(u32);
1706
1707                 /**
1708                  * handle page offset wrap around
1709                  */
1710                 loff = PSS_SMEM_PGOFF(loff);
1711                 if (loff == 0) {
1712                         pgnum++;
1713                         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1714                 }
1715         }
1716
1717         writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1718                ioc->ioc_regs.host_page_num_fn);
1719
1720         /*
1721          * release semaphore
1722          */
1723         readl(ioc->ioc_regs.ioc_init_sem_reg);
1724         writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1725         return 0;
1726 }
1727
1728 /* Retrieve saved firmware trace from a prior IOC failure. */
1729 int
1730 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1731 {
1732         u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
1733         int tlen, status = 0;
1734
1735         tlen = *trclen;
1736         if (tlen > BNA_DBG_FWTRC_LEN)
1737                 tlen = BNA_DBG_FWTRC_LEN;
1738
1739         status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
1740         *trclen = tlen;
1741         return status;
1742 }
1743
1744 /* Save firmware trace if configured. */
1745 static void
1746 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
1747 {
1748         int tlen;
1749
1750         if (ioc->dbg_fwsave_once) {
1751                 ioc->dbg_fwsave_once = 0;
1752                 if (ioc->dbg_fwsave_len) {
1753                         tlen = ioc->dbg_fwsave_len;
1754                         bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
1755                 }
1756         }
1757 }
1758
1759 /* Retrieve saved firmware trace from a prior IOC failure. */
1760 int
1761 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1762 {
1763         int tlen;
1764
1765         if (ioc->dbg_fwsave_len == 0)
1766                 return BFA_STATUS_ENOFSAVE;
1767
1768         tlen = *trclen;
1769         if (tlen > ioc->dbg_fwsave_len)
1770                 tlen = ioc->dbg_fwsave_len;
1771
1772         memcpy(trcdata, ioc->dbg_fwsave, tlen);
1773         *trclen = tlen;
1774         return BFA_STATUS_OK;
1775 }
1776
1777 static void
1778 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1779 {
1780         /**
1781          * Notify driver and common modules registered for notification.
1782          */
1783         ioc->cbfn->hbfail_cbfn(ioc->bfa);
1784         bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1785         bfa_nw_ioc_debug_save_ftrc(ioc);
1786 }
1787
1788 /* IOCPF to IOC interface */
1789 static void
1790 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1791 {
1792         bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1793 }
1794
1795 static void
1796 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1797 {
1798         bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1799 }
1800
1801 static void
1802 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1803 {
1804         bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1805 }
1806
1807 static void
1808 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
1809 {
1810         bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1811 }
1812
1813 static void
1814 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1815 {
1816         /**
1817          * Provide enable completion callback and AEN notification.
1818          */
1819         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1820 }
1821
1822 /* IOC public */
1823 static enum bfa_status
1824 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1825 {
1826         /*
1827          *  Hold semaphore so that nobody can access the chip during init.
1828          */
1829         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1830
1831         bfa_ioc_pll_init_asic(ioc);
1832
1833         ioc->pllinit = true;
1834
1835         /* Initialize LMEM */
1836         bfa_ioc_lmem_init(ioc);
1837
1838         /*
1839          *  release semaphore.
1840          */
1841         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1842
1843         return BFA_STATUS_OK;
1844 }
1845
1846 /* Interface used by diag module to do firmware boot with memory test
1847  * as the entry vector.
1848  */
1849 static void
1850 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1851                 u32 boot_env)
1852 {
1853         bfa_ioc_stats(ioc, ioc_boots);
1854
1855         if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1856                 return;
1857
1858         /**
1859          * Initialize IOC state of all functions on a chip reset.
1860          */
1861         if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
1862                 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
1863                 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
1864         } else {
1865                 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
1866                 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
1867         }
1868
1869         bfa_ioc_msgflush(ioc);
1870         bfa_ioc_download_fw(ioc, boot_type, boot_env);
1871         bfa_ioc_lpu_start(ioc);
1872 }
1873
1874 /* Enable/disable IOC failure auto recovery. */
1875 void
1876 bfa_nw_ioc_auto_recover(bool auto_recover)
1877 {
1878         bfa_nw_auto_recover = auto_recover;
1879 }
1880
1881 static bool
1882 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1883 {
1884         u32     *msgp = mbmsg;
1885         u32     r32;
1886         int             i;
1887
1888         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1889         if ((r32 & 1) == 0)
1890                 return false;
1891
1892         /**
1893          * read the MBOX msg
1894          */
1895         for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1896              i++) {
1897                 r32 = readl(ioc->ioc_regs.lpu_mbox +
1898                                    i * sizeof(u32));
1899                 msgp[i] = htonl(r32);
1900         }
1901
1902         /**
1903          * turn off mailbox interrupt by clearing mailbox status
1904          */
1905         writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1906         readl(ioc->ioc_regs.lpu_mbox_cmd);
1907
1908         return true;
1909 }
1910
1911 static void
1912 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1913 {
1914         union bfi_ioc_i2h_msg_u *msg;
1915         struct bfa_iocpf *iocpf = &ioc->iocpf;
1916
1917         msg = (union bfi_ioc_i2h_msg_u *) m;
1918
1919         bfa_ioc_stats(ioc, ioc_isrs);
1920
1921         switch (msg->mh.msg_id) {
1922         case BFI_IOC_I2H_HBEAT:
1923                 break;
1924
1925         case BFI_IOC_I2H_ENABLE_REPLY:
1926                 bfa_ioc_enable_reply(ioc,
1927                         (enum bfa_mode)msg->fw_event.port_mode,
1928                         msg->fw_event.cap_bm);
1929                 break;
1930
1931         case BFI_IOC_I2H_DISABLE_REPLY:
1932                 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1933                 break;
1934
1935         case BFI_IOC_I2H_GETATTR_REPLY:
1936                 bfa_ioc_getattr_reply(ioc);
1937                 break;
1938
1939         default:
1940                 BUG_ON(1);
1941         }
1942 }
1943
1944 /**
1945  * bfa_nw_ioc_attach - IOC attach time initialization and setup.
1946  *
1947  * @ioc:        memory for IOC
1948  * @bfa:        driver instance structure
1949  */
1950 void
1951 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1952 {
1953         ioc->bfa        = bfa;
1954         ioc->cbfn       = cbfn;
1955         ioc->fcmode     = false;
1956         ioc->pllinit    = false;
1957         ioc->dbg_fwsave_once = true;
1958         ioc->iocpf.ioc  = ioc;
1959
1960         bfa_ioc_mbox_attach(ioc);
1961         INIT_LIST_HEAD(&ioc->notify_q);
1962
1963         bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1964         bfa_fsm_send_event(ioc, IOC_E_RESET);
1965 }
1966
1967 /* Driver detach time IOC cleanup. */
1968 void
1969 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1970 {
1971         bfa_fsm_send_event(ioc, IOC_E_DETACH);
1972
1973         /* Done with detach, empty the notify_q. */
1974         INIT_LIST_HEAD(&ioc->notify_q);
1975 }
1976
1977 /**
1978  * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
1979  *
1980  * @pcidev:     PCI device information for this IOC
1981  */
1982 void
1983 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1984                  enum bfi_pcifn_class clscode)
1985 {
1986         ioc->clscode    = clscode;
1987         ioc->pcidev     = *pcidev;
1988
1989         /**
1990          * Initialize IOC and device personality
1991          */
1992         ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
1993         ioc->asic_mode  = BFI_ASIC_MODE_FC;
1994
1995         switch (pcidev->device_id) {
1996         case PCI_DEVICE_ID_BROCADE_CT:
1997                 ioc->asic_gen = BFI_ASIC_GEN_CT;
1998                 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
1999                 ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2000                 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2001                 ioc->ad_cap_bm = BFA_CM_CNA;
2002                 break;
2003
2004         case BFA_PCI_DEVICE_ID_CT2:
2005                 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2006                 if (clscode == BFI_PCIFN_CLASS_FC &&
2007                         pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2008                         ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2009                         ioc->fcmode = true;
2010                         ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2011                         ioc->ad_cap_bm = BFA_CM_HBA;
2012                 } else {
2013                         ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2014                         ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2015                         if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2016                                 ioc->port_mode =
2017                                 ioc->port_mode_cfg = BFA_MODE_CNA;
2018                                 ioc->ad_cap_bm = BFA_CM_CNA;
2019                         } else {
2020                                 ioc->port_mode =
2021                                 ioc->port_mode_cfg = BFA_MODE_NIC;
2022                                 ioc->ad_cap_bm = BFA_CM_NIC;
2023                         }
2024                 }
2025                 break;
2026
2027         default:
2028                 BUG_ON(1);
2029         }
2030
2031         /**
2032          * Set asic specific interfaces.
2033          */
2034         if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2035                 bfa_nw_ioc_set_ct_hwif(ioc);
2036         else {
2037                 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2038                 bfa_nw_ioc_set_ct2_hwif(ioc);
2039                 bfa_nw_ioc_ct2_poweron(ioc);
2040         }
2041
2042         bfa_ioc_map_port(ioc);
2043         bfa_ioc_reg_init(ioc);
2044 }
2045
2046 /**
2047  * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
2048  *
2049  * @dm_kva:     kernel virtual address of IOC dma memory
2050  * @dm_pa:      physical address of IOC dma memory
2051  */
2052 void
2053 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
2054 {
2055         /**
2056          * dma memory for firmware attribute
2057          */
2058         ioc->attr_dma.kva = dm_kva;
2059         ioc->attr_dma.pa = dm_pa;
2060         ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2061 }
2062
2063 /* Return size of dma memory required. */
2064 u32
2065 bfa_nw_ioc_meminfo(void)
2066 {
2067         return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2068 }
2069
2070 void
2071 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2072 {
2073         bfa_ioc_stats(ioc, ioc_enables);
2074         ioc->dbg_fwsave_once = true;
2075
2076         bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2077 }
2078
2079 void
2080 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2081 {
2082         bfa_ioc_stats(ioc, ioc_disables);
2083         bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2084 }
2085
2086 /* Initialize memory for saving firmware trace. */
2087 void
2088 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2089 {
2090         ioc->dbg_fwsave = dbg_fwsave;
2091         ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
2092 }
2093
2094 static u32
2095 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2096 {
2097         return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2098 }
2099
2100 /* Register mailbox message handler function, to be called by common modules */
2101 void
2102 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2103                     bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2104 {
2105         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2106
2107         mod->mbhdlr[mc].cbfn    = cbfn;
2108         mod->mbhdlr[mc].cbarg = cbarg;
2109 }
2110
2111 /**
2112  * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
2113  *
2114  * @ioc:        IOC instance
2115  * @cmd:        Mailbox command
2116  *
2117  * Waits if mailbox is busy. Responsibility of caller to serialize
2118  */
2119 bool
2120 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2121                         bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2122 {
2123         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2124         u32                     stat;
2125
2126         cmd->cbfn = cbfn;
2127         cmd->cbarg = cbarg;
2128
2129         /**
2130          * If a previous command is pending, queue new command
2131          */
2132         if (!list_empty(&mod->cmd_q)) {
2133                 list_add_tail(&cmd->qe, &mod->cmd_q);
2134                 return true;
2135         }
2136
2137         /**
2138          * If mailbox is busy, queue command for poll timer
2139          */
2140         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2141         if (stat) {
2142                 list_add_tail(&cmd->qe, &mod->cmd_q);
2143                 return true;
2144         }
2145
2146         /**
2147          * mailbox is free -- queue command to firmware
2148          */
2149         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2150
2151         return false;
2152 }
2153
2154 /* Handle mailbox interrupts */
2155 void
2156 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2157 {
2158         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2159         struct bfi_mbmsg m;
2160         int                             mc;
2161
2162         if (bfa_ioc_msgget(ioc, &m)) {
2163                 /**
2164                  * Treat IOC message class as special.
2165                  */
2166                 mc = m.mh.msg_class;
2167                 if (mc == BFI_MC_IOC) {
2168                         bfa_ioc_isr(ioc, &m);
2169                         return;
2170                 }
2171
2172                 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2173                         return;
2174
2175                 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2176         }
2177
2178         bfa_ioc_lpu_read_stat(ioc);
2179
2180         /**
2181          * Try to send pending mailbox commands
2182          */
2183         bfa_ioc_mbox_poll(ioc);
2184 }
2185
2186 void
2187 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2188 {
2189         bfa_ioc_stats(ioc, ioc_hbfails);
2190         bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2191         bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2192 }
2193
2194 /* return true if IOC is disabled */
2195 bool
2196 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2197 {
2198         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2199                 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2200 }
2201
2202 /* return true if IOC is operational */
2203 bool
2204 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2205 {
2206         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2207 }
2208
2209 /* Add to IOC heartbeat failure notification queue. To be used by common
2210  * modules such as cee, port, diag.
2211  */
2212 void
2213 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2214                         struct bfa_ioc_notify *notify)
2215 {
2216         list_add_tail(&notify->qe, &ioc->notify_q);
2217 }
2218
2219 #define BFA_MFG_NAME "Brocade"
2220 static void
2221 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2222                          struct bfa_adapter_attr *ad_attr)
2223 {
2224         struct bfi_ioc_attr *ioc_attr;
2225
2226         ioc_attr = ioc->attr;
2227
2228         bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2229         bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2230         bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2231         bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2232         memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2233                       sizeof(struct bfa_mfg_vpd));
2234
2235         ad_attr->nports = bfa_ioc_get_nports(ioc);
2236         ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2237
2238         bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2239         /* For now, model descr uses same model string */
2240         bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2241
2242         ad_attr->card_type = ioc_attr->card_type;
2243         ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2244
2245         if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2246                 ad_attr->prototype = 1;
2247         else
2248                 ad_attr->prototype = 0;
2249
2250         ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2251         ad_attr->mac  = bfa_nw_ioc_get_mac(ioc);
2252
2253         ad_attr->pcie_gen = ioc_attr->pcie_gen;
2254         ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2255         ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2256         ad_attr->asic_rev = ioc_attr->asic_rev;
2257
2258         bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2259 }
2260
2261 static enum bfa_ioc_type
2262 bfa_ioc_get_type(struct bfa_ioc *ioc)
2263 {
2264         if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2265                 return BFA_IOC_TYPE_LL;
2266
2267         BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2268
2269         return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2270                 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2271 }
2272
2273 static void
2274 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2275 {
2276         memcpy(serial_num,
2277                         (void *)ioc->attr->brcd_serialnum,
2278                         BFA_ADAPTER_SERIAL_NUM_LEN);
2279 }
2280
2281 static void
2282 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2283 {
2284         memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2285 }
2286
2287 static void
2288 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2289 {
2290         BUG_ON(!(chip_rev));
2291
2292         memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2293
2294         chip_rev[0] = 'R';
2295         chip_rev[1] = 'e';
2296         chip_rev[2] = 'v';
2297         chip_rev[3] = '-';
2298         chip_rev[4] = ioc->attr->asic_rev;
2299         chip_rev[5] = '\0';
2300 }
2301
2302 static void
2303 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2304 {
2305         memcpy(optrom_ver, ioc->attr->optrom_version,
2306                       BFA_VERSION_LEN);
2307 }
2308
2309 static void
2310 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2311 {
2312         memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2313 }
2314
2315 static void
2316 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2317 {
2318         struct bfi_ioc_attr *ioc_attr;
2319
2320         BUG_ON(!(model));
2321         memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2322
2323         ioc_attr = ioc->attr;
2324
2325         snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2326                 BFA_MFG_NAME, ioc_attr->card_type);
2327 }
2328
2329 static enum bfa_ioc_state
2330 bfa_ioc_get_state(struct bfa_ioc *ioc)
2331 {
2332         enum bfa_iocpf_state iocpf_st;
2333         enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2334
2335         if (ioc_st == BFA_IOC_ENABLING ||
2336                 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2337
2338                 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2339
2340                 switch (iocpf_st) {
2341                 case BFA_IOCPF_SEMWAIT:
2342                         ioc_st = BFA_IOC_SEMWAIT;
2343                         break;
2344
2345                 case BFA_IOCPF_HWINIT:
2346                         ioc_st = BFA_IOC_HWINIT;
2347                         break;
2348
2349                 case BFA_IOCPF_FWMISMATCH:
2350                         ioc_st = BFA_IOC_FWMISMATCH;
2351                         break;
2352
2353                 case BFA_IOCPF_FAIL:
2354                         ioc_st = BFA_IOC_FAIL;
2355                         break;
2356
2357                 case BFA_IOCPF_INITFAIL:
2358                         ioc_st = BFA_IOC_INITFAIL;
2359                         break;
2360
2361                 default:
2362                         break;
2363                 }
2364         }
2365         return ioc_st;
2366 }
2367
2368 void
2369 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2370 {
2371         memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2372
2373         ioc_attr->state = bfa_ioc_get_state(ioc);
2374         ioc_attr->port_id = bfa_ioc_portid(ioc);
2375         ioc_attr->port_mode = ioc->port_mode;
2376
2377         ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2378         ioc_attr->cap_bm = ioc->ad_cap_bm;
2379
2380         ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2381
2382         bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2383
2384         ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2385         ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2386         ioc_attr->def_fn = bfa_ioc_is_default(ioc);
2387         bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2388 }
2389
2390 /* WWN public */
2391 static u64
2392 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2393 {
2394         return ioc->attr->pwwn;
2395 }
2396
2397 mac_t
2398 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2399 {
2400         return ioc->attr->mac;
2401 }
2402
2403 /* Firmware failure detected. Start recovery actions. */
2404 static void
2405 bfa_ioc_recover(struct bfa_ioc *ioc)
2406 {
2407         pr_crit("Heart Beat of IOC has failed\n");
2408         bfa_ioc_stats(ioc, ioc_hbfails);
2409         bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2410         bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2411 }
2412
2413 /* BFA IOC PF private functions */
2414
2415 static void
2416 bfa_iocpf_enable(struct bfa_ioc *ioc)
2417 {
2418         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2419 }
2420
2421 static void
2422 bfa_iocpf_disable(struct bfa_ioc *ioc)
2423 {
2424         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2425 }
2426
2427 static void
2428 bfa_iocpf_fail(struct bfa_ioc *ioc)
2429 {
2430         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2431 }
2432
2433 static void
2434 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2435 {
2436         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2437 }
2438
2439 static void
2440 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2441 {
2442         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2443 }
2444
2445 static void
2446 bfa_iocpf_stop(struct bfa_ioc *ioc)
2447 {
2448         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2449 }
2450
2451 void
2452 bfa_nw_iocpf_timeout(void *ioc_arg)
2453 {
2454         struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2455         enum bfa_iocpf_state iocpf_st;
2456
2457         iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2458
2459         if (iocpf_st == BFA_IOCPF_HWINIT)
2460                 bfa_ioc_poll_fwinit(ioc);
2461         else
2462                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2463 }
2464
2465 void
2466 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2467 {
2468         struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2469
2470         bfa_ioc_hw_sem_get(ioc);
2471 }
2472
2473 static void
2474 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
2475 {
2476         u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2477
2478         if (fwstate == BFI_IOC_DISABLED) {
2479                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2480                 return;
2481         }
2482
2483         if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
2484                 bfa_nw_iocpf_timeout(ioc);
2485         } else {
2486                 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2487                 mod_timer(&ioc->iocpf_timer, jiffies +
2488                         msecs_to_jiffies(BFA_IOC_POLL_TOV));
2489         }
2490 }
2491
2492 /*
2493  *      Flash module specific
2494  */
2495
2496 /*
2497  * FLASH DMA buffer should be big enough to hold both MFG block and
2498  * asic block(64k) at the same time and also should be 2k aligned to
2499  * avoid write segement to cross sector boundary.
2500  */
2501 #define BFA_FLASH_SEG_SZ        2048
2502 #define BFA_FLASH_DMA_BUF_SZ    \
2503         roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
2504
2505 static void
2506 bfa_flash_cb(struct bfa_flash *flash)
2507 {
2508         flash->op_busy = 0;
2509         if (flash->cbfn)
2510                 flash->cbfn(flash->cbarg, flash->status);
2511 }
2512
2513 static void
2514 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
2515 {
2516         struct bfa_flash *flash = cbarg;
2517
2518         switch (event) {
2519         case BFA_IOC_E_DISABLED:
2520         case BFA_IOC_E_FAILED:
2521                 if (flash->op_busy) {
2522                         flash->status = BFA_STATUS_IOC_FAILURE;
2523                         flash->cbfn(flash->cbarg, flash->status);
2524                         flash->op_busy = 0;
2525                 }
2526                 break;
2527         default:
2528                 break;
2529         }
2530 }
2531
2532 /*
2533  * Send flash write request.
2534  */
2535 static void
2536 bfa_flash_write_send(struct bfa_flash *flash)
2537 {
2538         struct bfi_flash_write_req *msg =
2539                         (struct bfi_flash_write_req *) flash->mb.msg;
2540         u32     len;
2541
2542         msg->type = be32_to_cpu(flash->type);
2543         msg->instance = flash->instance;
2544         msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2545         len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2546                flash->residue : BFA_FLASH_DMA_BUF_SZ;
2547         msg->length = be32_to_cpu(len);
2548
2549         /* indicate if it's the last msg of the whole write operation */
2550         msg->last = (len == flash->residue) ? 1 : 0;
2551
2552         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
2553                     bfa_ioc_portid(flash->ioc));
2554         bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2555         memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
2556         bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2557
2558         flash->residue -= len;
2559         flash->offset += len;
2560 }
2561
2562 /**
2563  * bfa_flash_read_send - Send flash read request.
2564  *
2565  * @cbarg: callback argument
2566  */
2567 static void
2568 bfa_flash_read_send(void *cbarg)
2569 {
2570         struct bfa_flash *flash = cbarg;
2571         struct bfi_flash_read_req *msg =
2572                         (struct bfi_flash_read_req *) flash->mb.msg;
2573         u32     len;
2574
2575         msg->type = be32_to_cpu(flash->type);
2576         msg->instance = flash->instance;
2577         msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2578         len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2579                flash->residue : BFA_FLASH_DMA_BUF_SZ;
2580         msg->length = be32_to_cpu(len);
2581         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
2582                     bfa_ioc_portid(flash->ioc));
2583         bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2584         bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2585 }
2586
2587 /**
2588  * bfa_flash_intr - Process flash response messages upon receiving interrupts.
2589  *
2590  * @flasharg: flash structure
2591  * @msg: message structure
2592  */
2593 static void
2594 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
2595 {
2596         struct bfa_flash *flash = flasharg;
2597         u32     status;
2598
2599         union {
2600                 struct bfi_flash_query_rsp *query;
2601                 struct bfi_flash_write_rsp *write;
2602                 struct bfi_flash_read_rsp *read;
2603                 struct bfi_mbmsg   *msg;
2604         } m;
2605
2606         m.msg = msg;
2607
2608         /* receiving response after ioc failure */
2609         if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
2610                 return;
2611
2612         switch (msg->mh.msg_id) {
2613         case BFI_FLASH_I2H_QUERY_RSP:
2614                 status = be32_to_cpu(m.query->status);
2615                 if (status == BFA_STATUS_OK) {
2616                         u32     i;
2617                         struct bfa_flash_attr *attr, *f;
2618
2619                         attr = (struct bfa_flash_attr *) flash->ubuf;
2620                         f = (struct bfa_flash_attr *) flash->dbuf_kva;
2621                         attr->status = be32_to_cpu(f->status);
2622                         attr->npart = be32_to_cpu(f->npart);
2623                         for (i = 0; i < attr->npart; i++) {
2624                                 attr->part[i].part_type =
2625                                         be32_to_cpu(f->part[i].part_type);
2626                                 attr->part[i].part_instance =
2627                                         be32_to_cpu(f->part[i].part_instance);
2628                                 attr->part[i].part_off =
2629                                         be32_to_cpu(f->part[i].part_off);
2630                                 attr->part[i].part_size =
2631                                         be32_to_cpu(f->part[i].part_size);
2632                                 attr->part[i].part_len =
2633                                         be32_to_cpu(f->part[i].part_len);
2634                                 attr->part[i].part_status =
2635                                         be32_to_cpu(f->part[i].part_status);
2636                         }
2637                 }
2638                 flash->status = status;
2639                 bfa_flash_cb(flash);
2640                 break;
2641         case BFI_FLASH_I2H_WRITE_RSP:
2642                 status = be32_to_cpu(m.write->status);
2643                 if (status != BFA_STATUS_OK || flash->residue == 0) {
2644                         flash->status = status;
2645                         bfa_flash_cb(flash);
2646                 } else
2647                         bfa_flash_write_send(flash);
2648                 break;
2649         case BFI_FLASH_I2H_READ_RSP:
2650                 status = be32_to_cpu(m.read->status);
2651                 if (status != BFA_STATUS_OK) {
2652                         flash->status = status;
2653                         bfa_flash_cb(flash);
2654                 } else {
2655                         u32 len = be32_to_cpu(m.read->length);
2656                         memcpy(flash->ubuf + flash->offset,
2657                                flash->dbuf_kva, len);
2658                         flash->residue -= len;
2659                         flash->offset += len;
2660                         if (flash->residue == 0) {
2661                                 flash->status = status;
2662                                 bfa_flash_cb(flash);
2663                         } else
2664                                 bfa_flash_read_send(flash);
2665                 }
2666                 break;
2667         case BFI_FLASH_I2H_BOOT_VER_RSP:
2668         case BFI_FLASH_I2H_EVENT:
2669                 break;
2670         default:
2671                 WARN_ON(1);
2672         }
2673 }
2674
2675 /*
2676  * Flash memory info API.
2677  */
2678 u32
2679 bfa_nw_flash_meminfo(void)
2680 {
2681         return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2682 }
2683
2684 /**
2685  * bfa_nw_flash_attach - Flash attach API.
2686  *
2687  * @flash: flash structure
2688  * @ioc: ioc structure
2689  * @dev: device structure
2690  */
2691 void
2692 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
2693 {
2694         flash->ioc = ioc;
2695         flash->cbfn = NULL;
2696         flash->cbarg = NULL;
2697         flash->op_busy = 0;
2698
2699         bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
2700         bfa_q_qe_init(&flash->ioc_notify);
2701         bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
2702         list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
2703 }
2704
2705 /**
2706  * bfa_nw_flash_memclaim - Claim memory for flash
2707  *
2708  * @flash: flash structure
2709  * @dm_kva: pointer to virtual memory address
2710  * @dm_pa: physical memory address
2711  */
2712 void
2713 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
2714 {
2715         flash->dbuf_kva = dm_kva;
2716         flash->dbuf_pa = dm_pa;
2717         memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
2718         dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2719         dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2720 }
2721
2722 /**
2723  * bfa_nw_flash_get_attr - Get flash attribute.
2724  *
2725  * @flash: flash structure
2726  * @attr: flash attribute structure
2727  * @cbfn: callback function
2728  * @cbarg: callback argument
2729  *
2730  * Return status.
2731  */
2732 enum bfa_status
2733 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
2734                       bfa_cb_flash cbfn, void *cbarg)
2735 {
2736         struct bfi_flash_query_req *msg =
2737                         (struct bfi_flash_query_req *) flash->mb.msg;
2738
2739         if (!bfa_nw_ioc_is_operational(flash->ioc))
2740                 return BFA_STATUS_IOC_NON_OP;
2741
2742         if (flash->op_busy)
2743                 return BFA_STATUS_DEVBUSY;
2744
2745         flash->op_busy = 1;
2746         flash->cbfn = cbfn;
2747         flash->cbarg = cbarg;
2748         flash->ubuf = (u8 *) attr;
2749
2750         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
2751                     bfa_ioc_portid(flash->ioc));
2752         bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
2753         bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2754
2755         return BFA_STATUS_OK;
2756 }
2757
2758 /**
2759  * bfa_nw_flash_update_part - Update flash partition.
2760  *
2761  * @flash: flash structure
2762  * @type: flash partition type
2763  * @instance: flash partition instance
2764  * @buf: update data buffer
2765  * @len: data buffer length
2766  * @offset: offset relative to the partition starting address
2767  * @cbfn: callback function
2768  * @cbarg: callback argument
2769  *
2770  * Return status.
2771  */
2772 enum bfa_status
2773 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
2774                          void *buf, u32 len, u32 offset,
2775                          bfa_cb_flash cbfn, void *cbarg)
2776 {
2777         if (!bfa_nw_ioc_is_operational(flash->ioc))
2778                 return BFA_STATUS_IOC_NON_OP;
2779
2780         /*
2781          * 'len' must be in word (4-byte) boundary
2782          */
2783         if (!len || (len & 0x03))
2784                 return BFA_STATUS_FLASH_BAD_LEN;
2785
2786         if (type == BFA_FLASH_PART_MFG)
2787                 return BFA_STATUS_EINVAL;
2788
2789         if (flash->op_busy)
2790                 return BFA_STATUS_DEVBUSY;
2791
2792         flash->op_busy = 1;
2793         flash->cbfn = cbfn;
2794         flash->cbarg = cbarg;
2795         flash->type = type;
2796         flash->instance = instance;
2797         flash->residue = len;
2798         flash->offset = 0;
2799         flash->addr_off = offset;
2800         flash->ubuf = buf;
2801
2802         bfa_flash_write_send(flash);
2803
2804         return BFA_STATUS_OK;
2805 }
2806
2807 /**
2808  * bfa_nw_flash_read_part - Read flash partition.
2809  *
2810  * @flash: flash structure
2811  * @type: flash partition type
2812  * @instance: flash partition instance
2813  * @buf: read data buffer
2814  * @len: data buffer length
2815  * @offset: offset relative to the partition starting address
2816  * @cbfn: callback function
2817  * @cbarg: callback argument
2818  *
2819  * Return status.
2820  */
2821 enum bfa_status
2822 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
2823                        void *buf, u32 len, u32 offset,
2824                        bfa_cb_flash cbfn, void *cbarg)
2825 {
2826         if (!bfa_nw_ioc_is_operational(flash->ioc))
2827                 return BFA_STATUS_IOC_NON_OP;
2828
2829         /*
2830          * 'len' must be in word (4-byte) boundary
2831          */
2832         if (!len || (len & 0x03))
2833                 return BFA_STATUS_FLASH_BAD_LEN;
2834
2835         if (flash->op_busy)
2836                 return BFA_STATUS_DEVBUSY;
2837
2838         flash->op_busy = 1;
2839         flash->cbfn = cbfn;
2840         flash->cbarg = cbarg;
2841         flash->type = type;
2842         flash->instance = instance;
2843         flash->residue = len;
2844         flash->offset = 0;
2845         flash->addr_off = offset;
2846         flash->ubuf = buf;
2847
2848         bfa_flash_read_send(flash);
2849
2850         return BFA_STATUS_OK;
2851 }