Merge tag 'imx-drm-fixes-2016-05-24' of git://git.pengutronix.de/git/pza/linux into...
[cascardo/linux.git] / drivers / scsi / qla2xxx / qla_init.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #include "qla_devtbl.h"
15
16 #ifdef CONFIG_SPARC
17 #include <asm/prom.h>
18 #endif
19
20 #include <target/target_core_base.h>
21 #include "qla_target.h"
22
23 /*
24 *  QLogic ISP2x00 Hardware Support Function Prototypes.
25 */
26 static int qla2x00_isp_firmware(scsi_qla_host_t *);
27 static int qla2x00_setup_chip(scsi_qla_host_t *);
28 static int qla2x00_fw_ready(scsi_qla_host_t *);
29 static int qla2x00_configure_hba(scsi_qla_host_t *);
30 static int qla2x00_configure_loop(scsi_qla_host_t *);
31 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
32 static int qla2x00_configure_fabric(scsi_qla_host_t *);
33 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
34 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
35     uint16_t *);
36
37 static int qla2x00_restart_isp(scsi_qla_host_t *);
38
39 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
40 static int qla84xx_init_chip(scsi_qla_host_t *);
41 static int qla25xx_init_queues(struct qla_hw_data *);
42
43 /* SRB Extensions ---------------------------------------------------------- */
44
45 void
46 qla2x00_sp_timeout(unsigned long __data)
47 {
48         srb_t *sp = (srb_t *)__data;
49         struct srb_iocb *iocb;
50         fc_port_t *fcport = sp->fcport;
51         struct qla_hw_data *ha = fcport->vha->hw;
52         struct req_que *req;
53         unsigned long flags;
54
55         spin_lock_irqsave(&ha->hardware_lock, flags);
56         req = ha->req_q_map[0];
57         req->outstanding_cmds[sp->handle] = NULL;
58         iocb = &sp->u.iocb_cmd;
59         iocb->timeout(sp);
60         sp->free(fcport->vha, sp);
61         spin_unlock_irqrestore(&ha->hardware_lock, flags);
62 }
63
64 void
65 qla2x00_sp_free(void *data, void *ptr)
66 {
67         srb_t *sp = (srb_t *)ptr;
68         struct srb_iocb *iocb = &sp->u.iocb_cmd;
69         struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
70
71         del_timer(&iocb->timer);
72         qla2x00_rel_sp(vha, sp);
73 }
74
75 /* Asynchronous Login/Logout Routines -------------------------------------- */
76
77 unsigned long
78 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
79 {
80         unsigned long tmo;
81         struct qla_hw_data *ha = vha->hw;
82
83         /* Firmware should use switch negotiated r_a_tov for timeout. */
84         tmo = ha->r_a_tov / 10 * 2;
85         if (IS_QLAFX00(ha)) {
86                 tmo = FX00_DEF_RATOV * 2;
87         } else if (!IS_FWI2_CAPABLE(ha)) {
88                 /*
89                  * Except for earlier ISPs where the timeout is seeded from the
90                  * initialization control block.
91                  */
92                 tmo = ha->login_timeout;
93         }
94         return tmo;
95 }
96
97 static void
98 qla2x00_async_iocb_timeout(void *data)
99 {
100         srb_t *sp = (srb_t *)data;
101         fc_port_t *fcport = sp->fcport;
102
103         ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
104             "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
105             sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
106             fcport->d_id.b.al_pa);
107
108         fcport->flags &= ~FCF_ASYNC_SENT;
109         if (sp->type == SRB_LOGIN_CMD) {
110                 struct srb_iocb *lio = &sp->u.iocb_cmd;
111                 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
112                 /* Retry as needed. */
113                 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
114                 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
115                         QLA_LOGIO_LOGIN_RETRIED : 0;
116                 qla2x00_post_async_login_done_work(fcport->vha, fcport,
117                         lio->u.logio.data);
118         } else if (sp->type == SRB_LOGOUT_CMD) {
119                 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
120         }
121 }
122
123 static void
124 qla2x00_async_login_sp_done(void *data, void *ptr, int res)
125 {
126         srb_t *sp = (srb_t *)ptr;
127         struct srb_iocb *lio = &sp->u.iocb_cmd;
128         struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
129
130         if (!test_bit(UNLOADING, &vha->dpc_flags))
131                 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
132                     lio->u.logio.data);
133         sp->free(sp->fcport->vha, sp);
134 }
135
136 int
137 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
138     uint16_t *data)
139 {
140         srb_t *sp;
141         struct srb_iocb *lio;
142         int rval;
143
144         rval = QLA_FUNCTION_FAILED;
145         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
146         if (!sp)
147                 goto done;
148
149         sp->type = SRB_LOGIN_CMD;
150         sp->name = "login";
151         qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
152
153         lio = &sp->u.iocb_cmd;
154         lio->timeout = qla2x00_async_iocb_timeout;
155         sp->done = qla2x00_async_login_sp_done;
156         lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
157         if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
158                 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
159         rval = qla2x00_start_sp(sp);
160         if (rval != QLA_SUCCESS) {
161                 fcport->flags &= ~FCF_ASYNC_SENT;
162                 fcport->flags |= FCF_LOGIN_NEEDED;
163                 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
164                 goto done_free_sp;
165         }
166
167         ql_dbg(ql_dbg_disc, vha, 0x2072,
168             "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
169             "retries=%d.\n", sp->handle, fcport->loop_id,
170             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
171             fcport->login_retry);
172         return rval;
173
174 done_free_sp:
175         sp->free(fcport->vha, sp);
176 done:
177         return rval;
178 }
179
180 static void
181 qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
182 {
183         srb_t *sp = (srb_t *)ptr;
184         struct srb_iocb *lio = &sp->u.iocb_cmd;
185         struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
186
187         if (!test_bit(UNLOADING, &vha->dpc_flags))
188                 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
189                     lio->u.logio.data);
190         sp->free(sp->fcport->vha, sp);
191 }
192
193 int
194 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
195 {
196         srb_t *sp;
197         struct srb_iocb *lio;
198         int rval;
199
200         rval = QLA_FUNCTION_FAILED;
201         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
202         if (!sp)
203                 goto done;
204
205         sp->type = SRB_LOGOUT_CMD;
206         sp->name = "logout";
207         qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
208
209         lio = &sp->u.iocb_cmd;
210         lio->timeout = qla2x00_async_iocb_timeout;
211         sp->done = qla2x00_async_logout_sp_done;
212         rval = qla2x00_start_sp(sp);
213         if (rval != QLA_SUCCESS)
214                 goto done_free_sp;
215
216         ql_dbg(ql_dbg_disc, vha, 0x2070,
217             "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
218             sp->handle, fcport->loop_id, fcport->d_id.b.domain,
219             fcport->d_id.b.area, fcport->d_id.b.al_pa);
220         return rval;
221
222 done_free_sp:
223         sp->free(fcport->vha, sp);
224 done:
225         return rval;
226 }
227
228 static void
229 qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
230 {
231         srb_t *sp = (srb_t *)ptr;
232         struct srb_iocb *lio = &sp->u.iocb_cmd;
233         struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
234
235         if (!test_bit(UNLOADING, &vha->dpc_flags))
236                 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
237                     lio->u.logio.data);
238         sp->free(sp->fcport->vha, sp);
239 }
240
241 int
242 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
243     uint16_t *data)
244 {
245         srb_t *sp;
246         struct srb_iocb *lio;
247         int rval;
248
249         rval = QLA_FUNCTION_FAILED;
250         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
251         if (!sp)
252                 goto done;
253
254         sp->type = SRB_ADISC_CMD;
255         sp->name = "adisc";
256         qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
257
258         lio = &sp->u.iocb_cmd;
259         lio->timeout = qla2x00_async_iocb_timeout;
260         sp->done = qla2x00_async_adisc_sp_done;
261         if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
262                 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
263         rval = qla2x00_start_sp(sp);
264         if (rval != QLA_SUCCESS)
265                 goto done_free_sp;
266
267         ql_dbg(ql_dbg_disc, vha, 0x206f,
268             "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
269             sp->handle, fcport->loop_id, fcport->d_id.b.domain,
270             fcport->d_id.b.area, fcport->d_id.b.al_pa);
271         return rval;
272
273 done_free_sp:
274         sp->free(fcport->vha, sp);
275 done:
276         return rval;
277 }
278
279 static void
280 qla2x00_tmf_iocb_timeout(void *data)
281 {
282         srb_t *sp = (srb_t *)data;
283         struct srb_iocb *tmf = &sp->u.iocb_cmd;
284
285         tmf->u.tmf.comp_status = CS_TIMEOUT;
286         complete(&tmf->u.tmf.comp);
287 }
288
289 static void
290 qla2x00_tmf_sp_done(void *data, void *ptr, int res)
291 {
292         srb_t *sp = (srb_t *)ptr;
293         struct srb_iocb *tmf = &sp->u.iocb_cmd;
294         complete(&tmf->u.tmf.comp);
295 }
296
297 int
298 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
299         uint32_t tag)
300 {
301         struct scsi_qla_host *vha = fcport->vha;
302         struct srb_iocb *tm_iocb;
303         srb_t *sp;
304         int rval = QLA_FUNCTION_FAILED;
305
306         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
307         if (!sp)
308                 goto done;
309
310         tm_iocb = &sp->u.iocb_cmd;
311         sp->type = SRB_TM_CMD;
312         sp->name = "tmf";
313         qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
314         tm_iocb->u.tmf.flags = flags;
315         tm_iocb->u.tmf.lun = lun;
316         tm_iocb->u.tmf.data = tag;
317         sp->done = qla2x00_tmf_sp_done;
318         tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
319         init_completion(&tm_iocb->u.tmf.comp);
320
321         rval = qla2x00_start_sp(sp);
322         if (rval != QLA_SUCCESS)
323                 goto done_free_sp;
324
325         ql_dbg(ql_dbg_taskm, vha, 0x802f,
326             "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
327             sp->handle, fcport->loop_id, fcport->d_id.b.domain,
328             fcport->d_id.b.area, fcport->d_id.b.al_pa);
329
330         wait_for_completion(&tm_iocb->u.tmf.comp);
331
332         rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
333             QLA_SUCCESS : QLA_FUNCTION_FAILED;
334
335         if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
336                 ql_dbg(ql_dbg_taskm, vha, 0x8030,
337                     "TM IOCB failed (%x).\n", rval);
338         }
339
340         if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
341                 flags = tm_iocb->u.tmf.flags;
342                 lun = (uint16_t)tm_iocb->u.tmf.lun;
343
344                 /* Issue Marker IOCB */
345                 qla2x00_marker(vha, vha->hw->req_q_map[0],
346                     vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
347                     flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
348         }
349
350 done_free_sp:
351         sp->free(vha, sp);
352 done:
353         return rval;
354 }
355
356 static void
357 qla24xx_abort_iocb_timeout(void *data)
358 {
359         srb_t *sp = (srb_t *)data;
360         struct srb_iocb *abt = &sp->u.iocb_cmd;
361
362         abt->u.abt.comp_status = CS_TIMEOUT;
363         complete(&abt->u.abt.comp);
364 }
365
366 static void
367 qla24xx_abort_sp_done(void *data, void *ptr, int res)
368 {
369         srb_t *sp = (srb_t *)ptr;
370         struct srb_iocb *abt = &sp->u.iocb_cmd;
371
372         complete(&abt->u.abt.comp);
373 }
374
375 static int
376 qla24xx_async_abort_cmd(srb_t *cmd_sp)
377 {
378         scsi_qla_host_t *vha = cmd_sp->fcport->vha;
379         fc_port_t *fcport = cmd_sp->fcport;
380         struct srb_iocb *abt_iocb;
381         srb_t *sp;
382         int rval = QLA_FUNCTION_FAILED;
383
384         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
385         if (!sp)
386                 goto done;
387
388         abt_iocb = &sp->u.iocb_cmd;
389         sp->type = SRB_ABT_CMD;
390         sp->name = "abort";
391         qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
392         abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
393         sp->done = qla24xx_abort_sp_done;
394         abt_iocb->timeout = qla24xx_abort_iocb_timeout;
395         init_completion(&abt_iocb->u.abt.comp);
396
397         rval = qla2x00_start_sp(sp);
398         if (rval != QLA_SUCCESS)
399                 goto done_free_sp;
400
401         ql_dbg(ql_dbg_async, vha, 0x507c,
402             "Abort command issued - hdl=%x, target_id=%x\n",
403             cmd_sp->handle, fcport->tgt_id);
404
405         wait_for_completion(&abt_iocb->u.abt.comp);
406
407         rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
408             QLA_SUCCESS : QLA_FUNCTION_FAILED;
409
410 done_free_sp:
411         sp->free(vha, sp);
412 done:
413         return rval;
414 }
415
416 int
417 qla24xx_async_abort_command(srb_t *sp)
418 {
419         unsigned long   flags = 0;
420
421         uint32_t        handle;
422         fc_port_t       *fcport = sp->fcport;
423         struct scsi_qla_host *vha = fcport->vha;
424         struct qla_hw_data *ha = vha->hw;
425         struct req_que *req = vha->req;
426
427         spin_lock_irqsave(&ha->hardware_lock, flags);
428         for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
429                 if (req->outstanding_cmds[handle] == sp)
430                         break;
431         }
432         spin_unlock_irqrestore(&ha->hardware_lock, flags);
433         if (handle == req->num_outstanding_cmds) {
434                 /* Command not found. */
435                 return QLA_FUNCTION_FAILED;
436         }
437         if (sp->type == SRB_FXIOCB_DCMD)
438                 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
439                     FXDISC_ABORT_IOCTL);
440
441         return qla24xx_async_abort_cmd(sp);
442 }
443
444 void
445 qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
446     uint16_t *data)
447 {
448         int rval;
449
450         switch (data[0]) {
451         case MBS_COMMAND_COMPLETE:
452                 /*
453                  * Driver must validate login state - If PRLI not complete,
454                  * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
455                  * requests.
456                  */
457                 rval = qla2x00_get_port_database(vha, fcport, 0);
458                 if (rval == QLA_NOT_LOGGED_IN) {
459                         fcport->flags &= ~FCF_ASYNC_SENT;
460                         fcport->flags |= FCF_LOGIN_NEEDED;
461                         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
462                         break;
463                 }
464
465                 if (rval != QLA_SUCCESS) {
466                         qla2x00_post_async_logout_work(vha, fcport, NULL);
467                         qla2x00_post_async_login_work(vha, fcport, NULL);
468                         break;
469                 }
470                 if (fcport->flags & FCF_FCP2_DEVICE) {
471                         qla2x00_post_async_adisc_work(vha, fcport, data);
472                         break;
473                 }
474                 qla2x00_update_fcport(vha, fcport);
475                 break;
476         case MBS_COMMAND_ERROR:
477                 fcport->flags &= ~FCF_ASYNC_SENT;
478                 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
479                         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
480                 else
481                         qla2x00_mark_device_lost(vha, fcport, 1, 0);
482                 break;
483         case MBS_PORT_ID_USED:
484                 fcport->loop_id = data[1];
485                 qla2x00_post_async_logout_work(vha, fcport, NULL);
486                 qla2x00_post_async_login_work(vha, fcport, NULL);
487                 break;
488         case MBS_LOOP_ID_USED:
489                 fcport->loop_id++;
490                 rval = qla2x00_find_new_loop_id(vha, fcport);
491                 if (rval != QLA_SUCCESS) {
492                         fcport->flags &= ~FCF_ASYNC_SENT;
493                         qla2x00_mark_device_lost(vha, fcport, 1, 0);
494                         break;
495                 }
496                 qla2x00_post_async_login_work(vha, fcport, NULL);
497                 break;
498         }
499         return;
500 }
501
502 void
503 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
504     uint16_t *data)
505 {
506         /* Don't re-login in target mode */
507         if (!fcport->tgt_session)
508                 qla2x00_mark_device_lost(vha, fcport, 1, 0);
509         qlt_logo_completion_handler(fcport, data[0]);
510         return;
511 }
512
513 void
514 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
515     uint16_t *data)
516 {
517         if (data[0] == MBS_COMMAND_COMPLETE) {
518                 qla2x00_update_fcport(vha, fcport);
519
520                 return;
521         }
522
523         /* Retry login. */
524         fcport->flags &= ~FCF_ASYNC_SENT;
525         if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
526                 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
527         else
528                 qla2x00_mark_device_lost(vha, fcport, 1, 0);
529
530         return;
531 }
532
533 /****************************************************************************/
534 /*                QLogic ISP2x00 Hardware Support Functions.                */
535 /****************************************************************************/
536
537 static int
538 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
539 {
540         int rval = QLA_SUCCESS;
541         struct qla_hw_data *ha = vha->hw;
542         uint32_t idc_major_ver, idc_minor_ver;
543         uint16_t config[4];
544
545         qla83xx_idc_lock(vha, 0);
546
547         /* SV: TODO: Assign initialization timeout from
548          * flash-info / other param
549          */
550         ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
551         ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
552
553         /* Set our fcoe function presence */
554         if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
555                 ql_dbg(ql_dbg_p3p, vha, 0xb077,
556                     "Error while setting DRV-Presence.\n");
557                 rval = QLA_FUNCTION_FAILED;
558                 goto exit;
559         }
560
561         /* Decide the reset ownership */
562         qla83xx_reset_ownership(vha);
563
564         /*
565          * On first protocol driver load:
566          * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
567          * register.
568          * Others: Check compatibility with current IDC Major version.
569          */
570         qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
571         if (ha->flags.nic_core_reset_owner) {
572                 /* Set IDC Major version */
573                 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
574                 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
575
576                 /* Clearing IDC-Lock-Recovery register */
577                 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
578         } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
579                 /*
580                  * Clear further IDC participation if we are not compatible with
581                  * the current IDC Major Version.
582                  */
583                 ql_log(ql_log_warn, vha, 0xb07d,
584                     "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
585                     idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
586                 __qla83xx_clear_drv_presence(vha);
587                 rval = QLA_FUNCTION_FAILED;
588                 goto exit;
589         }
590         /* Each function sets its supported Minor version. */
591         qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
592         idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
593         qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
594
595         if (ha->flags.nic_core_reset_owner) {
596                 memset(config, 0, sizeof(config));
597                 if (!qla81xx_get_port_config(vha, config))
598                         qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
599                             QLA8XXX_DEV_READY);
600         }
601
602         rval = qla83xx_idc_state_handler(vha);
603
604 exit:
605         qla83xx_idc_unlock(vha, 0);
606
607         return rval;
608 }
609
610 /*
611 * qla2x00_initialize_adapter
612 *      Initialize board.
613 *
614 * Input:
615 *      ha = adapter block pointer.
616 *
617 * Returns:
618 *      0 = success
619 */
620 int
621 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
622 {
623         int     rval;
624         struct qla_hw_data *ha = vha->hw;
625         struct req_que *req = ha->req_q_map[0];
626
627         /* Clear adapter flags. */
628         vha->flags.online = 0;
629         ha->flags.chip_reset_done = 0;
630         vha->flags.reset_active = 0;
631         ha->flags.pci_channel_io_perm_failure = 0;
632         ha->flags.eeh_busy = 0;
633         vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
634         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
635         atomic_set(&vha->loop_state, LOOP_DOWN);
636         vha->device_flags = DFLG_NO_CABLE;
637         vha->dpc_flags = 0;
638         vha->flags.management_server_logged_in = 0;
639         vha->marker_needed = 0;
640         ha->isp_abort_cnt = 0;
641         ha->beacon_blink_led = 0;
642
643         set_bit(0, ha->req_qid_map);
644         set_bit(0, ha->rsp_qid_map);
645
646         ql_dbg(ql_dbg_init, vha, 0x0040,
647             "Configuring PCI space...\n");
648         rval = ha->isp_ops->pci_config(vha);
649         if (rval) {
650                 ql_log(ql_log_warn, vha, 0x0044,
651                     "Unable to configure PCI space.\n");
652                 return (rval);
653         }
654
655         ha->isp_ops->reset_chip(vha);
656
657         rval = qla2xxx_get_flash_info(vha);
658         if (rval) {
659                 ql_log(ql_log_fatal, vha, 0x004f,
660                     "Unable to validate FLASH data.\n");
661                 return rval;
662         }
663
664         if (IS_QLA8044(ha)) {
665                 qla8044_read_reset_template(vha);
666
667                 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
668                  * If DONRESET_BIT0 is set, drivers should not set dev_state
669                  * to NEED_RESET. But if NEED_RESET is set, drivers should
670                  * should honor the reset. */
671                 if (ql2xdontresethba == 1)
672                         qla8044_set_idc_dontreset(vha);
673         }
674
675         ha->isp_ops->get_flash_version(vha, req->ring);
676         ql_dbg(ql_dbg_init, vha, 0x0061,
677             "Configure NVRAM parameters...\n");
678
679         ha->isp_ops->nvram_config(vha);
680
681         if (ha->flags.disable_serdes) {
682                 /* Mask HBA via NVRAM settings? */
683                 ql_log(ql_log_info, vha, 0x0077,
684                     "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
685                 return QLA_FUNCTION_FAILED;
686         }
687
688         ql_dbg(ql_dbg_init, vha, 0x0078,
689             "Verifying loaded RISC code...\n");
690
691         if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
692                 rval = ha->isp_ops->chip_diag(vha);
693                 if (rval)
694                         return (rval);
695                 rval = qla2x00_setup_chip(vha);
696                 if (rval)
697                         return (rval);
698         }
699
700         if (IS_QLA84XX(ha)) {
701                 ha->cs84xx = qla84xx_get_chip(vha);
702                 if (!ha->cs84xx) {
703                         ql_log(ql_log_warn, vha, 0x00d0,
704                             "Unable to configure ISP84XX.\n");
705                         return QLA_FUNCTION_FAILED;
706                 }
707         }
708
709         if (qla_ini_mode_enabled(vha))
710                 rval = qla2x00_init_rings(vha);
711
712         ha->flags.chip_reset_done = 1;
713
714         if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
715                 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
716                 rval = qla84xx_init_chip(vha);
717                 if (rval != QLA_SUCCESS) {
718                         ql_log(ql_log_warn, vha, 0x00d4,
719                             "Unable to initialize ISP84XX.\n");
720                         qla84xx_put_chip(vha);
721                 }
722         }
723
724         /* Load the NIC Core f/w if we are the first protocol driver. */
725         if (IS_QLA8031(ha)) {
726                 rval = qla83xx_nic_core_fw_load(vha);
727                 if (rval)
728                         ql_log(ql_log_warn, vha, 0x0124,
729                             "Error in initializing NIC Core f/w.\n");
730         }
731
732         if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
733                 qla24xx_read_fcp_prio_cfg(vha);
734
735         if (IS_P3P_TYPE(ha))
736                 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
737         else
738                 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
739
740         return (rval);
741 }
742
743 /**
744  * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
745  * @ha: HA context
746  *
747  * Returns 0 on success.
748  */
749 int
750 qla2100_pci_config(scsi_qla_host_t *vha)
751 {
752         uint16_t w;
753         unsigned long flags;
754         struct qla_hw_data *ha = vha->hw;
755         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
756
757         pci_set_master(ha->pdev);
758         pci_try_set_mwi(ha->pdev);
759
760         pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
761         w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
762         pci_write_config_word(ha->pdev, PCI_COMMAND, w);
763
764         pci_disable_rom(ha->pdev);
765
766         /* Get PCI bus information. */
767         spin_lock_irqsave(&ha->hardware_lock, flags);
768         ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
769         spin_unlock_irqrestore(&ha->hardware_lock, flags);
770
771         return QLA_SUCCESS;
772 }
773
774 /**
775  * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
776  * @ha: HA context
777  *
778  * Returns 0 on success.
779  */
780 int
781 qla2300_pci_config(scsi_qla_host_t *vha)
782 {
783         uint16_t        w;
784         unsigned long   flags = 0;
785         uint32_t        cnt;
786         struct qla_hw_data *ha = vha->hw;
787         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
788
789         pci_set_master(ha->pdev);
790         pci_try_set_mwi(ha->pdev);
791
792         pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
793         w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
794
795         if (IS_QLA2322(ha) || IS_QLA6322(ha))
796                 w &= ~PCI_COMMAND_INTX_DISABLE;
797         pci_write_config_word(ha->pdev, PCI_COMMAND, w);
798
799         /*
800          * If this is a 2300 card and not 2312, reset the
801          * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
802          * the 2310 also reports itself as a 2300 so we need to get the
803          * fb revision level -- a 6 indicates it really is a 2300 and
804          * not a 2310.
805          */
806         if (IS_QLA2300(ha)) {
807                 spin_lock_irqsave(&ha->hardware_lock, flags);
808
809                 /* Pause RISC. */
810                 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
811                 for (cnt = 0; cnt < 30000; cnt++) {
812                         if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
813                                 break;
814
815                         udelay(10);
816                 }
817
818                 /* Select FPM registers. */
819                 WRT_REG_WORD(&reg->ctrl_status, 0x20);
820                 RD_REG_WORD(&reg->ctrl_status);
821
822                 /* Get the fb rev level */
823                 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
824
825                 if (ha->fb_rev == FPM_2300)
826                         pci_clear_mwi(ha->pdev);
827
828                 /* Deselect FPM registers. */
829                 WRT_REG_WORD(&reg->ctrl_status, 0x0);
830                 RD_REG_WORD(&reg->ctrl_status);
831
832                 /* Release RISC module. */
833                 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
834                 for (cnt = 0; cnt < 30000; cnt++) {
835                         if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
836                                 break;
837
838                         udelay(10);
839                 }
840
841                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
842         }
843
844         pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
845
846         pci_disable_rom(ha->pdev);
847
848         /* Get PCI bus information. */
849         spin_lock_irqsave(&ha->hardware_lock, flags);
850         ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
851         spin_unlock_irqrestore(&ha->hardware_lock, flags);
852
853         return QLA_SUCCESS;
854 }
855
856 /**
857  * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
858  * @ha: HA context
859  *
860  * Returns 0 on success.
861  */
862 int
863 qla24xx_pci_config(scsi_qla_host_t *vha)
864 {
865         uint16_t w;
866         unsigned long flags = 0;
867         struct qla_hw_data *ha = vha->hw;
868         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
869
870         pci_set_master(ha->pdev);
871         pci_try_set_mwi(ha->pdev);
872
873         pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
874         w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
875         w &= ~PCI_COMMAND_INTX_DISABLE;
876         pci_write_config_word(ha->pdev, PCI_COMMAND, w);
877
878         pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
879
880         /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
881         if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
882                 pcix_set_mmrbc(ha->pdev, 2048);
883
884         /* PCIe -- adjust Maximum Read Request Size (2048). */
885         if (pci_is_pcie(ha->pdev))
886                 pcie_set_readrq(ha->pdev, 4096);
887
888         pci_disable_rom(ha->pdev);
889
890         ha->chip_revision = ha->pdev->revision;
891
892         /* Get PCI bus information. */
893         spin_lock_irqsave(&ha->hardware_lock, flags);
894         ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
895         spin_unlock_irqrestore(&ha->hardware_lock, flags);
896
897         return QLA_SUCCESS;
898 }
899
900 /**
901  * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
902  * @ha: HA context
903  *
904  * Returns 0 on success.
905  */
906 int
907 qla25xx_pci_config(scsi_qla_host_t *vha)
908 {
909         uint16_t w;
910         struct qla_hw_data *ha = vha->hw;
911
912         pci_set_master(ha->pdev);
913         pci_try_set_mwi(ha->pdev);
914
915         pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
916         w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
917         w &= ~PCI_COMMAND_INTX_DISABLE;
918         pci_write_config_word(ha->pdev, PCI_COMMAND, w);
919
920         /* PCIe -- adjust Maximum Read Request Size (2048). */
921         if (pci_is_pcie(ha->pdev))
922                 pcie_set_readrq(ha->pdev, 4096);
923
924         pci_disable_rom(ha->pdev);
925
926         ha->chip_revision = ha->pdev->revision;
927
928         return QLA_SUCCESS;
929 }
930
931 /**
932  * qla2x00_isp_firmware() - Choose firmware image.
933  * @ha: HA context
934  *
935  * Returns 0 on success.
936  */
937 static int
938 qla2x00_isp_firmware(scsi_qla_host_t *vha)
939 {
940         int  rval;
941         uint16_t loop_id, topo, sw_cap;
942         uint8_t domain, area, al_pa;
943         struct qla_hw_data *ha = vha->hw;
944
945         /* Assume loading risc code */
946         rval = QLA_FUNCTION_FAILED;
947
948         if (ha->flags.disable_risc_code_load) {
949                 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
950
951                 /* Verify checksum of loaded RISC code. */
952                 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
953                 if (rval == QLA_SUCCESS) {
954                         /* And, verify we are not in ROM code. */
955                         rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
956                             &area, &domain, &topo, &sw_cap);
957                 }
958         }
959
960         if (rval)
961                 ql_dbg(ql_dbg_init, vha, 0x007a,
962                     "**** Load RISC code ****.\n");
963
964         return (rval);
965 }
966
967 /**
968  * qla2x00_reset_chip() - Reset ISP chip.
969  * @ha: HA context
970  *
971  * Returns 0 on success.
972  */
973 void
974 qla2x00_reset_chip(scsi_qla_host_t *vha)
975 {
976         unsigned long   flags = 0;
977         struct qla_hw_data *ha = vha->hw;
978         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
979         uint32_t        cnt;
980         uint16_t        cmd;
981
982         if (unlikely(pci_channel_offline(ha->pdev)))
983                 return;
984
985         ha->isp_ops->disable_intrs(ha);
986
987         spin_lock_irqsave(&ha->hardware_lock, flags);
988
989         /* Turn off master enable */
990         cmd = 0;
991         pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
992         cmd &= ~PCI_COMMAND_MASTER;
993         pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
994
995         if (!IS_QLA2100(ha)) {
996                 /* Pause RISC. */
997                 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
998                 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
999                         for (cnt = 0; cnt < 30000; cnt++) {
1000                                 if ((RD_REG_WORD(&reg->hccr) &
1001                                     HCCR_RISC_PAUSE) != 0)
1002                                         break;
1003                                 udelay(100);
1004                         }
1005                 } else {
1006                         RD_REG_WORD(&reg->hccr);        /* PCI Posting. */
1007                         udelay(10);
1008                 }
1009
1010                 /* Select FPM registers. */
1011                 WRT_REG_WORD(&reg->ctrl_status, 0x20);
1012                 RD_REG_WORD(&reg->ctrl_status);         /* PCI Posting. */
1013
1014                 /* FPM Soft Reset. */
1015                 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
1016                 RD_REG_WORD(&reg->fpm_diag_config);     /* PCI Posting. */
1017
1018                 /* Toggle Fpm Reset. */
1019                 if (!IS_QLA2200(ha)) {
1020                         WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
1021                         RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
1022                 }
1023
1024                 /* Select frame buffer registers. */
1025                 WRT_REG_WORD(&reg->ctrl_status, 0x10);
1026                 RD_REG_WORD(&reg->ctrl_status);         /* PCI Posting. */
1027
1028                 /* Reset frame buffer FIFOs. */
1029                 if (IS_QLA2200(ha)) {
1030                         WRT_FB_CMD_REG(ha, reg, 0xa000);
1031                         RD_FB_CMD_REG(ha, reg);         /* PCI Posting. */
1032                 } else {
1033                         WRT_FB_CMD_REG(ha, reg, 0x00fc);
1034
1035                         /* Read back fb_cmd until zero or 3 seconds max */
1036                         for (cnt = 0; cnt < 3000; cnt++) {
1037                                 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
1038                                         break;
1039                                 udelay(100);
1040                         }
1041                 }
1042
1043                 /* Select RISC module registers. */
1044                 WRT_REG_WORD(&reg->ctrl_status, 0);
1045                 RD_REG_WORD(&reg->ctrl_status);         /* PCI Posting. */
1046
1047                 /* Reset RISC processor. */
1048                 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1049                 RD_REG_WORD(&reg->hccr);                /* PCI Posting. */
1050
1051                 /* Release RISC processor. */
1052                 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1053                 RD_REG_WORD(&reg->hccr);                /* PCI Posting. */
1054         }
1055
1056         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
1057         WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
1058
1059         /* Reset ISP chip. */
1060         WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1061
1062         /* Wait for RISC to recover from reset. */
1063         if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1064                 /*
1065                  * It is necessary to for a delay here since the card doesn't
1066                  * respond to PCI reads during a reset. On some architectures
1067                  * this will result in an MCA.
1068                  */
1069                 udelay(20);
1070                 for (cnt = 30000; cnt; cnt--) {
1071                         if ((RD_REG_WORD(&reg->ctrl_status) &
1072                             CSR_ISP_SOFT_RESET) == 0)
1073                                 break;
1074                         udelay(100);
1075                 }
1076         } else
1077                 udelay(10);
1078
1079         /* Reset RISC processor. */
1080         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1081
1082         WRT_REG_WORD(&reg->semaphore, 0);
1083
1084         /* Release RISC processor. */
1085         WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1086         RD_REG_WORD(&reg->hccr);                        /* PCI Posting. */
1087
1088         if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1089                 for (cnt = 0; cnt < 30000; cnt++) {
1090                         if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
1091                                 break;
1092
1093                         udelay(100);
1094                 }
1095         } else
1096                 udelay(100);
1097
1098         /* Turn on master enable */
1099         cmd |= PCI_COMMAND_MASTER;
1100         pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
1101
1102         /* Disable RISC pause on FPM parity error. */
1103         if (!IS_QLA2100(ha)) {
1104                 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
1105                 RD_REG_WORD(&reg->hccr);                /* PCI Posting. */
1106         }
1107
1108         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1109 }
1110
1111 /**
1112  * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
1113  *
1114  * Returns 0 on success.
1115  */
1116 static int
1117 qla81xx_reset_mpi(scsi_qla_host_t *vha)
1118 {
1119         uint16_t mb[4] = {0x1010, 0, 1, 0};
1120
1121         if (!IS_QLA81XX(vha->hw))
1122                 return QLA_SUCCESS;
1123
1124         return qla81xx_write_mpi_register(vha, mb);
1125 }
1126
1127 /**
1128  * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
1129  * @ha: HA context
1130  *
1131  * Returns 0 on success.
1132  */
1133 static inline int
1134 qla24xx_reset_risc(scsi_qla_host_t *vha)
1135 {
1136         unsigned long flags = 0;
1137         struct qla_hw_data *ha = vha->hw;
1138         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1139         uint32_t cnt;
1140         uint16_t wd;
1141         static int abts_cnt; /* ISP abort retry counts */
1142         int rval = QLA_SUCCESS;
1143
1144         spin_lock_irqsave(&ha->hardware_lock, flags);
1145
1146         /* Reset RISC. */
1147         WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
1148         for (cnt = 0; cnt < 30000; cnt++) {
1149                 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
1150                         break;
1151
1152                 udelay(10);
1153         }
1154
1155         if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
1156                 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
1157
1158         ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
1159             "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
1160             RD_REG_DWORD(&reg->hccr),
1161             RD_REG_DWORD(&reg->ctrl_status),
1162             (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
1163
1164         WRT_REG_DWORD(&reg->ctrl_status,
1165             CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
1166         pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
1167
1168         udelay(100);
1169
1170         /* Wait for firmware to complete NVRAM accesses. */
1171         RD_REG_WORD(&reg->mailbox0);
1172         for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
1173             rval == QLA_SUCCESS; cnt--) {
1174                 barrier();
1175                 if (cnt)
1176                         udelay(5);
1177                 else
1178                         rval = QLA_FUNCTION_TIMEOUT;
1179         }
1180
1181         if (rval == QLA_SUCCESS)
1182                 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
1183
1184         ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
1185             "HCCR: 0x%x, MailBox0 Status 0x%x\n",
1186             RD_REG_DWORD(&reg->hccr),
1187             RD_REG_DWORD(&reg->mailbox0));
1188
1189         /* Wait for soft-reset to complete. */
1190         RD_REG_DWORD(&reg->ctrl_status);
1191         for (cnt = 0; cnt < 6000000; cnt++) {
1192                 barrier();
1193                 if ((RD_REG_DWORD(&reg->ctrl_status) &
1194                     CSRX_ISP_SOFT_RESET) == 0)
1195                         break;
1196
1197                 udelay(5);
1198         }
1199         if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
1200                 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
1201
1202         ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
1203             "HCCR: 0x%x, Soft Reset status: 0x%x\n",
1204             RD_REG_DWORD(&reg->hccr),
1205             RD_REG_DWORD(&reg->ctrl_status));
1206
1207         /* If required, do an MPI FW reset now */
1208         if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
1209                 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
1210                         if (++abts_cnt < 5) {
1211                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1212                                 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
1213                         } else {
1214                                 /*
1215                                  * We exhausted the ISP abort retries. We have to
1216                                  * set the board offline.
1217                                  */
1218                                 abts_cnt = 0;
1219                                 vha->flags.online = 0;
1220                         }
1221                 }
1222         }
1223
1224         WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
1225         RD_REG_DWORD(&reg->hccr);
1226
1227         WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
1228         RD_REG_DWORD(&reg->hccr);
1229
1230         WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
1231         RD_REG_DWORD(&reg->hccr);
1232
1233         RD_REG_WORD(&reg->mailbox0);
1234         for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
1235             rval == QLA_SUCCESS; cnt--) {
1236                 barrier();
1237                 if (cnt)
1238                         udelay(5);
1239                 else
1240                         rval = QLA_FUNCTION_TIMEOUT;
1241         }
1242         if (rval == QLA_SUCCESS)
1243                 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
1244
1245         ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
1246             "Host Risc 0x%x, mailbox0 0x%x\n",
1247             RD_REG_DWORD(&reg->hccr),
1248              RD_REG_WORD(&reg->mailbox0));
1249
1250         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1251
1252         ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
1253             "Driver in %s mode\n",
1254             IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
1255
1256         if (IS_NOPOLLING_TYPE(ha))
1257                 ha->isp_ops->enable_intrs(ha);
1258
1259         return rval;
1260 }
1261
1262 static void
1263 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
1264 {
1265         struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
1266
1267         WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
1268         *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
1269
1270 }
1271
1272 static void
1273 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
1274 {
1275         struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
1276
1277         WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
1278         WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
1279 }
1280
1281 static void
1282 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
1283 {
1284         uint32_t wd32 = 0;
1285         uint delta_msec = 100;
1286         uint elapsed_msec = 0;
1287         uint timeout_msec;
1288         ulong n;
1289
1290         if (vha->hw->pdev->subsystem_device != 0x0175 &&
1291             vha->hw->pdev->subsystem_device != 0x0240)
1292                 return;
1293
1294         WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
1295         udelay(100);
1296
1297 attempt:
1298         timeout_msec = TIMEOUT_SEMAPHORE;
1299         n = timeout_msec / delta_msec;
1300         while (n--) {
1301                 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
1302                 qla25xx_read_risc_sema_reg(vha, &wd32);
1303                 if (wd32 & RISC_SEMAPHORE)
1304                         break;
1305                 msleep(delta_msec);
1306                 elapsed_msec += delta_msec;
1307                 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
1308                         goto force;
1309         }
1310
1311         if (!(wd32 & RISC_SEMAPHORE))
1312                 goto force;
1313
1314         if (!(wd32 & RISC_SEMAPHORE_FORCE))
1315                 goto acquired;
1316
1317         qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
1318         timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
1319         n = timeout_msec / delta_msec;
1320         while (n--) {
1321                 qla25xx_read_risc_sema_reg(vha, &wd32);
1322                 if (!(wd32 & RISC_SEMAPHORE_FORCE))
1323                         break;
1324                 msleep(delta_msec);
1325                 elapsed_msec += delta_msec;
1326                 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
1327                         goto force;
1328         }
1329
1330         if (wd32 & RISC_SEMAPHORE_FORCE)
1331                 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
1332
1333         goto attempt;
1334
1335 force:
1336         qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
1337
1338 acquired:
1339         return;
1340 }
1341
1342 /**
1343  * qla24xx_reset_chip() - Reset ISP24xx chip.
1344  * @ha: HA context
1345  *
1346  * Returns 0 on success.
1347  */
1348 void
1349 qla24xx_reset_chip(scsi_qla_host_t *vha)
1350 {
1351         struct qla_hw_data *ha = vha->hw;
1352
1353         if (pci_channel_offline(ha->pdev) &&
1354             ha->flags.pci_channel_io_perm_failure) {
1355                 return;
1356         }
1357
1358         ha->isp_ops->disable_intrs(ha);
1359
1360         qla25xx_manipulate_risc_semaphore(vha);
1361
1362         /* Perform RISC reset. */
1363         qla24xx_reset_risc(vha);
1364 }
1365
1366 /**
1367  * qla2x00_chip_diag() - Test chip for proper operation.
1368  * @ha: HA context
1369  *
1370  * Returns 0 on success.
1371  */
1372 int
1373 qla2x00_chip_diag(scsi_qla_host_t *vha)
1374 {
1375         int             rval;
1376         struct qla_hw_data *ha = vha->hw;
1377         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1378         unsigned long   flags = 0;
1379         uint16_t        data;
1380         uint32_t        cnt;
1381         uint16_t        mb[5];
1382         struct req_que *req = ha->req_q_map[0];
1383
1384         /* Assume a failed state */
1385         rval = QLA_FUNCTION_FAILED;
1386
1387         ql_dbg(ql_dbg_init, vha, 0x007b,
1388             "Testing device at %lx.\n", (u_long)&reg->flash_address);
1389
1390         spin_lock_irqsave(&ha->hardware_lock, flags);
1391
1392         /* Reset ISP chip. */
1393         WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1394
1395         /*
1396          * We need to have a delay here since the card will not respond while
1397          * in reset causing an MCA on some architectures.
1398          */
1399         udelay(20);
1400         data = qla2x00_debounce_register(&reg->ctrl_status);
1401         for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
1402                 udelay(5);
1403                 data = RD_REG_WORD(&reg->ctrl_status);
1404                 barrier();
1405         }
1406
1407         if (!cnt)
1408                 goto chip_diag_failed;
1409
1410         ql_dbg(ql_dbg_init, vha, 0x007c,
1411             "Reset register cleared by chip reset.\n");
1412
1413         /* Reset RISC processor. */
1414         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1415         WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1416
1417         /* Workaround for QLA2312 PCI parity error */
1418         if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1419                 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
1420                 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
1421                         udelay(5);
1422                         data = RD_MAILBOX_REG(ha, reg, 0);
1423                         barrier();
1424                 }
1425         } else
1426                 udelay(10);
1427
1428         if (!cnt)
1429                 goto chip_diag_failed;
1430
1431         /* Check product ID of chip */
1432         ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
1433
1434         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1435         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
1436         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
1437         mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1438         if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1439             mb[3] != PROD_ID_3) {
1440                 ql_log(ql_log_warn, vha, 0x0062,
1441                     "Wrong product ID = 0x%x,0x%x,0x%x.\n",
1442                     mb[1], mb[2], mb[3]);
1443
1444                 goto chip_diag_failed;
1445         }
1446         ha->product_id[0] = mb[1];
1447         ha->product_id[1] = mb[2];
1448         ha->product_id[2] = mb[3];
1449         ha->product_id[3] = mb[4];
1450
1451         /* Adjust fw RISC transfer size */
1452         if (req->length > 1024)
1453                 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
1454         else
1455                 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
1456                     req->length;
1457
1458         if (IS_QLA2200(ha) &&
1459             RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1460                 /* Limit firmware transfer size with a 2200A */
1461                 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1462
1463                 ha->device_type |= DT_ISP2200A;
1464                 ha->fw_transfer_size = 128;
1465         }
1466
1467         /* Wrap Incoming Mailboxes Test. */
1468         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1469
1470         ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
1471         rval = qla2x00_mbx_reg_test(vha);
1472         if (rval)
1473                 ql_log(ql_log_warn, vha, 0x0080,
1474                     "Failed mailbox send register test.\n");
1475         else
1476                 /* Flag a successful rval */
1477                 rval = QLA_SUCCESS;
1478         spin_lock_irqsave(&ha->hardware_lock, flags);
1479
1480 chip_diag_failed:
1481         if (rval)
1482                 ql_log(ql_log_info, vha, 0x0081,
1483                     "Chip diagnostics **** FAILED ****.\n");
1484
1485         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1486
1487         return (rval);
1488 }
1489
1490 /**
1491  * qla24xx_chip_diag() - Test ISP24xx for proper operation.
1492  * @ha: HA context
1493  *
1494  * Returns 0 on success.
1495  */
1496 int
1497 qla24xx_chip_diag(scsi_qla_host_t *vha)
1498 {
1499         int rval;
1500         struct qla_hw_data *ha = vha->hw;
1501         struct req_que *req = ha->req_q_map[0];
1502
1503         if (IS_P3P_TYPE(ha))
1504                 return QLA_SUCCESS;
1505
1506         ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
1507
1508         rval = qla2x00_mbx_reg_test(vha);
1509         if (rval) {
1510                 ql_log(ql_log_warn, vha, 0x0082,
1511                     "Failed mailbox send register test.\n");
1512         } else {
1513                 /* Flag a successful rval */
1514                 rval = QLA_SUCCESS;
1515         }
1516
1517         return rval;
1518 }
1519
1520 void
1521 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1522 {
1523         int rval;
1524         uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
1525             eft_size, fce_size, mq_size;
1526         dma_addr_t tc_dma;
1527         void *tc;
1528         struct qla_hw_data *ha = vha->hw;
1529         struct req_que *req = ha->req_q_map[0];
1530         struct rsp_que *rsp = ha->rsp_q_map[0];
1531
1532         if (ha->fw_dump) {
1533                 ql_dbg(ql_dbg_init, vha, 0x00bd,
1534                     "Firmware dump already allocated.\n");
1535                 return;
1536         }
1537
1538         ha->fw_dumped = 0;
1539         ha->fw_dump_cap_flags = 0;
1540         dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
1541         req_q_size = rsp_q_size = 0;
1542
1543         if (IS_QLA27XX(ha))
1544                 goto try_fce;
1545
1546         if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1547                 fixed_size = sizeof(struct qla2100_fw_dump);
1548         } else if (IS_QLA23XX(ha)) {
1549                 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
1550                 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1551                     sizeof(uint16_t);
1552         } else if (IS_FWI2_CAPABLE(ha)) {
1553                 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1554                         fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
1555                 else if (IS_QLA81XX(ha))
1556                         fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
1557                 else if (IS_QLA25XX(ha))
1558                         fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
1559                 else
1560                         fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
1561
1562                 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1563                     sizeof(uint32_t);
1564                 if (ha->mqenable) {
1565                         if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1566                                 mq_size = sizeof(struct qla2xxx_mq_chain);
1567                         /*
1568                          * Allocate maximum buffer size for all queues.
1569                          * Resizing must be done at end-of-dump processing.
1570                          */
1571                         mq_size += ha->max_req_queues *
1572                             (req->length * sizeof(request_t));
1573                         mq_size += ha->max_rsp_queues *
1574                             (rsp->length * sizeof(response_t));
1575                 }
1576                 if (ha->tgt.atio_ring)
1577                         mq_size += ha->tgt.atio_q_length * sizeof(request_t);
1578                 /* Allocate memory for Fibre Channel Event Buffer. */
1579                 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1580                     !IS_QLA27XX(ha))
1581                         goto try_eft;
1582
1583 try_fce:
1584                 if (ha->fce)
1585                         dma_free_coherent(&ha->pdev->dev,
1586                             FCE_SIZE, ha->fce, ha->fce_dma);
1587
1588                 /* Allocate memory for Fibre Channel Event Buffer. */
1589                 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1590                                          GFP_KERNEL);
1591                 if (!tc) {
1592                         ql_log(ql_log_warn, vha, 0x00be,
1593                             "Unable to allocate (%d KB) for FCE.\n",
1594                             FCE_SIZE / 1024);
1595                         goto try_eft;
1596                 }
1597
1598                 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
1599                     ha->fce_mb, &ha->fce_bufs);
1600                 if (rval) {
1601                         ql_log(ql_log_warn, vha, 0x00bf,
1602                             "Unable to initialize FCE (%d).\n", rval);
1603                         dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1604                             tc_dma);
1605                         ha->flags.fce_enabled = 0;
1606                         goto try_eft;
1607                 }
1608                 ql_dbg(ql_dbg_init, vha, 0x00c0,
1609                     "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
1610
1611                 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
1612                 ha->flags.fce_enabled = 1;
1613                 ha->fce_dma = tc_dma;
1614                 ha->fce = tc;
1615
1616 try_eft:
1617                 if (ha->eft)
1618                         dma_free_coherent(&ha->pdev->dev,
1619                             EFT_SIZE, ha->eft, ha->eft_dma);
1620
1621                 /* Allocate memory for Extended Trace Buffer. */
1622                 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1623                                          GFP_KERNEL);
1624                 if (!tc) {
1625                         ql_log(ql_log_warn, vha, 0x00c1,
1626                             "Unable to allocate (%d KB) for EFT.\n",
1627                             EFT_SIZE / 1024);
1628                         goto cont_alloc;
1629                 }
1630
1631                 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
1632                 if (rval) {
1633                         ql_log(ql_log_warn, vha, 0x00c2,
1634                             "Unable to initialize EFT (%d).\n", rval);
1635                         dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1636                             tc_dma);
1637                         goto cont_alloc;
1638                 }
1639                 ql_dbg(ql_dbg_init, vha, 0x00c3,
1640                     "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
1641
1642                 eft_size = EFT_SIZE;
1643                 ha->eft_dma = tc_dma;
1644                 ha->eft = tc;
1645         }
1646
1647 cont_alloc:
1648         if (IS_QLA27XX(ha)) {
1649                 if (!ha->fw_dump_template) {
1650                         ql_log(ql_log_warn, vha, 0x00ba,
1651                             "Failed missing fwdump template\n");
1652                         return;
1653                 }
1654                 dump_size = qla27xx_fwdt_calculate_dump_size(vha);
1655                 ql_dbg(ql_dbg_init, vha, 0x00fa,
1656                     "-> allocating fwdump (%x bytes)...\n", dump_size);
1657                 goto allocate;
1658         }
1659
1660         req_q_size = req->length * sizeof(request_t);
1661         rsp_q_size = rsp->length * sizeof(response_t);
1662         dump_size = offsetof(struct qla2xxx_fw_dump, isp);
1663         dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
1664         ha->chain_offset = dump_size;
1665         dump_size += mq_size + fce_size;
1666
1667 allocate:
1668         ha->fw_dump = vmalloc(dump_size);
1669         if (!ha->fw_dump) {
1670                 ql_log(ql_log_warn, vha, 0x00c4,
1671                     "Unable to allocate (%d KB) for firmware dump.\n",
1672                     dump_size / 1024);
1673
1674                 if (ha->fce) {
1675                         dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
1676                             ha->fce_dma);
1677                         ha->fce = NULL;
1678                         ha->fce_dma = 0;
1679                 }
1680
1681                 if (ha->eft) {
1682                         dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
1683                             ha->eft_dma);
1684                         ha->eft = NULL;
1685                         ha->eft_dma = 0;
1686                 }
1687                 return;
1688         }
1689         ha->fw_dump_len = dump_size;
1690         ql_dbg(ql_dbg_init, vha, 0x00c5,
1691             "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
1692
1693         if (IS_QLA27XX(ha))
1694                 return;
1695
1696         ha->fw_dump->signature[0] = 'Q';
1697         ha->fw_dump->signature[1] = 'L';
1698         ha->fw_dump->signature[2] = 'G';
1699         ha->fw_dump->signature[3] = 'C';
1700         ha->fw_dump->version = htonl(1);
1701
1702         ha->fw_dump->fixed_size = htonl(fixed_size);
1703         ha->fw_dump->mem_size = htonl(mem_size);
1704         ha->fw_dump->req_q_size = htonl(req_q_size);
1705         ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
1706
1707         ha->fw_dump->eft_size = htonl(eft_size);
1708         ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
1709         ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
1710
1711         ha->fw_dump->header_size =
1712             htonl(offsetof(struct qla2xxx_fw_dump, isp));
1713 }
1714
1715 static int
1716 qla81xx_mpi_sync(scsi_qla_host_t *vha)
1717 {
1718 #define MPS_MASK        0xe0
1719         int rval;
1720         uint16_t dc;
1721         uint32_t dw;
1722
1723         if (!IS_QLA81XX(vha->hw))
1724                 return QLA_SUCCESS;
1725
1726         rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1727         if (rval != QLA_SUCCESS) {
1728                 ql_log(ql_log_warn, vha, 0x0105,
1729                     "Unable to acquire semaphore.\n");
1730                 goto done;
1731         }
1732
1733         pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1734         rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1735         if (rval != QLA_SUCCESS) {
1736                 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
1737                 goto done_release;
1738         }
1739
1740         dc &= MPS_MASK;
1741         if (dc == (dw & MPS_MASK))
1742                 goto done_release;
1743
1744         dw &= ~MPS_MASK;
1745         dw |= dc;
1746         rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1747         if (rval != QLA_SUCCESS) {
1748                 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
1749         }
1750
1751 done_release:
1752         rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1753         if (rval != QLA_SUCCESS) {
1754                 ql_log(ql_log_warn, vha, 0x006d,
1755                     "Unable to release semaphore.\n");
1756         }
1757
1758 done:
1759         return rval;
1760 }
1761
1762 int
1763 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
1764 {
1765         /* Don't try to reallocate the array */
1766         if (req->outstanding_cmds)
1767                 return QLA_SUCCESS;
1768
1769         if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
1770             (ql2xmultique_tag || ql2xmaxqueues > 1)))
1771                 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
1772         else {
1773                 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
1774                         req->num_outstanding_cmds = ha->cur_fw_xcb_count;
1775                 else
1776                         req->num_outstanding_cmds = ha->cur_fw_iocb_count;
1777         }
1778
1779         req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
1780             req->num_outstanding_cmds, GFP_KERNEL);
1781
1782         if (!req->outstanding_cmds) {
1783                 /*
1784                  * Try to allocate a minimal size just so we can get through
1785                  * initialization.
1786                  */
1787                 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
1788                 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
1789                     req->num_outstanding_cmds, GFP_KERNEL);
1790
1791                 if (!req->outstanding_cmds) {
1792                         ql_log(ql_log_fatal, NULL, 0x0126,
1793                             "Failed to allocate memory for "
1794                             "outstanding_cmds for req_que %p.\n", req);
1795                         req->num_outstanding_cmds = 0;
1796                         return QLA_FUNCTION_FAILED;
1797                 }
1798         }
1799
1800         return QLA_SUCCESS;
1801 }
1802
1803 /**
1804  * qla2x00_setup_chip() - Load and start RISC firmware.
1805  * @ha: HA context
1806  *
1807  * Returns 0 on success.
1808  */
1809 static int
1810 qla2x00_setup_chip(scsi_qla_host_t *vha)
1811 {
1812         int rval;
1813         uint32_t srisc_address = 0;
1814         struct qla_hw_data *ha = vha->hw;
1815         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1816         unsigned long flags;
1817         uint16_t fw_major_version;
1818
1819         if (IS_P3P_TYPE(ha)) {
1820                 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1821                 if (rval == QLA_SUCCESS) {
1822                         qla2x00_stop_firmware(vha);
1823                         goto enable_82xx_npiv;
1824                 } else
1825                         goto failed;
1826         }
1827
1828         if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1829                 /* Disable SRAM, Instruction RAM and GP RAM parity.  */
1830                 spin_lock_irqsave(&ha->hardware_lock, flags);
1831                 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
1832                 RD_REG_WORD(&reg->hccr);
1833                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1834         }
1835
1836         qla81xx_mpi_sync(vha);
1837
1838         /* Load firmware sequences */
1839         rval = ha->isp_ops->load_risc(vha, &srisc_address);
1840         if (rval == QLA_SUCCESS) {
1841                 ql_dbg(ql_dbg_init, vha, 0x00c9,
1842                     "Verifying Checksum of loaded RISC code.\n");
1843
1844                 rval = qla2x00_verify_checksum(vha, srisc_address);
1845                 if (rval == QLA_SUCCESS) {
1846                         /* Start firmware execution. */
1847                         ql_dbg(ql_dbg_init, vha, 0x00ca,
1848                             "Starting firmware.\n");
1849
1850                         if (ql2xexlogins)
1851                                 ha->flags.exlogins_enabled = 1;
1852
1853                         if (ql2xexchoffld)
1854                                 ha->flags.exchoffld_enabled = 1;
1855
1856                         rval = qla2x00_execute_fw(vha, srisc_address);
1857                         /* Retrieve firmware information. */
1858                         if (rval == QLA_SUCCESS) {
1859                                 rval = qla2x00_set_exlogins_buffer(vha);
1860                                 if (rval != QLA_SUCCESS)
1861                                         goto failed;
1862
1863                                 rval = qla2x00_set_exchoffld_buffer(vha);
1864                                 if (rval != QLA_SUCCESS)
1865                                         goto failed;
1866
1867 enable_82xx_npiv:
1868                                 fw_major_version = ha->fw_major_version;
1869                                 if (IS_P3P_TYPE(ha))
1870                                         qla82xx_check_md_needed(vha);
1871                                 else
1872                                         rval = qla2x00_get_fw_version(vha);
1873                                 if (rval != QLA_SUCCESS)
1874                                         goto failed;
1875                                 ha->flags.npiv_supported = 0;
1876                                 if (IS_QLA2XXX_MIDTYPE(ha) &&
1877                                          (ha->fw_attributes & BIT_2)) {
1878                                         ha->flags.npiv_supported = 1;
1879                                         if ((!ha->max_npiv_vports) ||
1880                                             ((ha->max_npiv_vports + 1) %
1881                                             MIN_MULTI_ID_FABRIC))
1882                                                 ha->max_npiv_vports =
1883                                                     MIN_MULTI_ID_FABRIC - 1;
1884                                 }
1885                                 qla2x00_get_resource_cnts(vha);
1886
1887                                 /*
1888                                  * Allocate the array of outstanding commands
1889                                  * now that we know the firmware resources.
1890                                  */
1891                                 rval = qla2x00_alloc_outstanding_cmds(ha,
1892                                     vha->req);
1893                                 if (rval != QLA_SUCCESS)
1894                                         goto failed;
1895
1896                                 if (!fw_major_version && ql2xallocfwdump
1897                                     && !(IS_P3P_TYPE(ha)))
1898                                         qla2x00_alloc_fw_dump(vha);
1899                         } else {
1900                                 goto failed;
1901                         }
1902                 } else {
1903                         ql_log(ql_log_fatal, vha, 0x00cd,
1904                             "ISP Firmware failed checksum.\n");
1905                         goto failed;
1906                 }
1907         } else
1908                 goto failed;
1909
1910         if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1911                 /* Enable proper parity. */
1912                 spin_lock_irqsave(&ha->hardware_lock, flags);
1913                 if (IS_QLA2300(ha))
1914                         /* SRAM parity */
1915                         WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
1916                 else
1917                         /* SRAM, Instruction RAM and GP RAM parity */
1918                         WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
1919                 RD_REG_WORD(&reg->hccr);
1920                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1921         }
1922
1923         if (IS_QLA27XX(ha))
1924                 ha->flags.fac_supported = 1;
1925         else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1926                 uint32_t size;
1927
1928                 rval = qla81xx_fac_get_sector_size(vha, &size);
1929                 if (rval == QLA_SUCCESS) {
1930                         ha->flags.fac_supported = 1;
1931                         ha->fdt_block_size = size << 2;
1932                 } else {
1933                         ql_log(ql_log_warn, vha, 0x00ce,
1934                             "Unsupported FAC firmware (%d.%02d.%02d).\n",
1935                             ha->fw_major_version, ha->fw_minor_version,
1936                             ha->fw_subminor_version);
1937
1938                         if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
1939                                 ha->flags.fac_supported = 0;
1940                                 rval = QLA_SUCCESS;
1941                         }
1942                 }
1943         }
1944 failed:
1945         if (rval) {
1946                 ql_log(ql_log_fatal, vha, 0x00cf,
1947                     "Setup chip ****FAILED****.\n");
1948         }
1949
1950         return (rval);
1951 }
1952
1953 /**
1954  * qla2x00_init_response_q_entries() - Initializes response queue entries.
1955  * @ha: HA context
1956  *
1957  * Beginning of request ring has initialization control block already built
1958  * by nvram config routine.
1959  *
1960  * Returns 0 on success.
1961  */
1962 void
1963 qla2x00_init_response_q_entries(struct rsp_que *rsp)
1964 {
1965         uint16_t cnt;
1966         response_t *pkt;
1967
1968         rsp->ring_ptr = rsp->ring;
1969         rsp->ring_index    = 0;
1970         rsp->status_srb = NULL;
1971         pkt = rsp->ring_ptr;
1972         for (cnt = 0; cnt < rsp->length; cnt++) {
1973                 pkt->signature = RESPONSE_PROCESSED;
1974                 pkt++;
1975         }
1976 }
1977
1978 /**
1979  * qla2x00_update_fw_options() - Read and process firmware options.
1980  * @ha: HA context
1981  *
1982  * Returns 0 on success.
1983  */
1984 void
1985 qla2x00_update_fw_options(scsi_qla_host_t *vha)
1986 {
1987         uint16_t swing, emphasis, tx_sens, rx_sens;
1988         struct qla_hw_data *ha = vha->hw;
1989
1990         memset(ha->fw_options, 0, sizeof(ha->fw_options));
1991         qla2x00_get_fw_options(vha, ha->fw_options);
1992
1993         if (IS_QLA2100(ha) || IS_QLA2200(ha))
1994                 return;
1995
1996         /* Serial Link options. */
1997         ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
1998             "Serial link options.\n");
1999         ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
2000             (uint8_t *)&ha->fw_seriallink_options,
2001             sizeof(ha->fw_seriallink_options));
2002
2003         ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
2004         if (ha->fw_seriallink_options[3] & BIT_2) {
2005                 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
2006
2007                 /*  1G settings */
2008                 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
2009                 emphasis = (ha->fw_seriallink_options[2] &
2010                     (BIT_4 | BIT_3)) >> 3;
2011                 tx_sens = ha->fw_seriallink_options[0] &
2012                     (BIT_3 | BIT_2 | BIT_1 | BIT_0);
2013                 rx_sens = (ha->fw_seriallink_options[0] &
2014                     (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
2015                 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
2016                 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
2017                         if (rx_sens == 0x0)
2018                                 rx_sens = 0x3;
2019                         ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
2020                 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
2021                         ha->fw_options[10] |= BIT_5 |
2022                             ((rx_sens & (BIT_1 | BIT_0)) << 2) |
2023                             (tx_sens & (BIT_1 | BIT_0));
2024
2025                 /*  2G settings */
2026                 swing = (ha->fw_seriallink_options[2] &
2027                     (BIT_7 | BIT_6 | BIT_5)) >> 5;
2028                 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
2029                 tx_sens = ha->fw_seriallink_options[1] &
2030                     (BIT_3 | BIT_2 | BIT_1 | BIT_0);
2031                 rx_sens = (ha->fw_seriallink_options[1] &
2032                     (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
2033                 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
2034                 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
2035                         if (rx_sens == 0x0)
2036                                 rx_sens = 0x3;
2037                         ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
2038                 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
2039                         ha->fw_options[11] |= BIT_5 |
2040                             ((rx_sens & (BIT_1 | BIT_0)) << 2) |
2041                             (tx_sens & (BIT_1 | BIT_0));
2042         }
2043
2044         /* FCP2 options. */
2045         /*  Return command IOCBs without waiting for an ABTS to complete. */
2046         ha->fw_options[3] |= BIT_13;
2047
2048         /* LED scheme. */
2049         if (ha->flags.enable_led_scheme)
2050                 ha->fw_options[2] |= BIT_12;
2051
2052         /* Detect ISP6312. */
2053         if (IS_QLA6312(ha))
2054                 ha->fw_options[2] |= BIT_13;
2055
2056         /* Update firmware options. */
2057         qla2x00_set_fw_options(vha, ha->fw_options);
2058 }
2059
2060 void
2061 qla24xx_update_fw_options(scsi_qla_host_t *vha)
2062 {
2063         int rval;
2064         struct qla_hw_data *ha = vha->hw;
2065
2066         if (IS_P3P_TYPE(ha))
2067                 return;
2068
2069         /*  Hold status IOCBs until ABTS response received. */
2070         if (ql2xfwholdabts)
2071                 ha->fw_options[3] |= BIT_12;
2072
2073         /* Update Serial Link options. */
2074         if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
2075                 return;
2076
2077         rval = qla2x00_set_serdes_params(vha,
2078             le16_to_cpu(ha->fw_seriallink_options24[1]),
2079             le16_to_cpu(ha->fw_seriallink_options24[2]),
2080             le16_to_cpu(ha->fw_seriallink_options24[3]));
2081         if (rval != QLA_SUCCESS) {
2082                 ql_log(ql_log_warn, vha, 0x0104,
2083                     "Unable to update Serial Link options (%x).\n", rval);
2084         }
2085 }
2086
2087 void
2088 qla2x00_config_rings(struct scsi_qla_host *vha)
2089 {
2090         struct qla_hw_data *ha = vha->hw;
2091         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2092         struct req_que *req = ha->req_q_map[0];
2093         struct rsp_que *rsp = ha->rsp_q_map[0];
2094
2095         /* Setup ring parameters in initialization control block. */
2096         ha->init_cb->request_q_outpointer = cpu_to_le16(0);
2097         ha->init_cb->response_q_inpointer = cpu_to_le16(0);
2098         ha->init_cb->request_q_length = cpu_to_le16(req->length);
2099         ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
2100         ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
2101         ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
2102         ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
2103         ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
2104
2105         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
2106         WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
2107         WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
2108         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
2109         RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg));            /* PCI Posting. */
2110 }
2111
2112 void
2113 qla24xx_config_rings(struct scsi_qla_host *vha)
2114 {
2115         struct qla_hw_data *ha = vha->hw;
2116         device_reg_t *reg = ISP_QUE_REG(ha, 0);
2117         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
2118         struct qla_msix_entry *msix;
2119         struct init_cb_24xx *icb;
2120         uint16_t rid = 0;
2121         struct req_que *req = ha->req_q_map[0];
2122         struct rsp_que *rsp = ha->rsp_q_map[0];
2123
2124         /* Setup ring parameters in initialization control block. */
2125         icb = (struct init_cb_24xx *)ha->init_cb;
2126         icb->request_q_outpointer = cpu_to_le16(0);
2127         icb->response_q_inpointer = cpu_to_le16(0);
2128         icb->request_q_length = cpu_to_le16(req->length);
2129         icb->response_q_length = cpu_to_le16(rsp->length);
2130         icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
2131         icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
2132         icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
2133         icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
2134
2135         /* Setup ATIO queue dma pointers for target mode */
2136         icb->atio_q_inpointer = cpu_to_le16(0);
2137         icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
2138         icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
2139         icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
2140
2141         if (IS_SHADOW_REG_CAPABLE(ha))
2142                 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
2143
2144         if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2145                 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
2146                 icb->rid = cpu_to_le16(rid);
2147                 if (ha->flags.msix_enabled) {
2148                         msix = &ha->msix_entries[1];
2149                         ql_dbg(ql_dbg_init, vha, 0x00fd,
2150                             "Registering vector 0x%x for base que.\n",
2151                             msix->entry);
2152                         icb->msix = cpu_to_le16(msix->entry);
2153                 }
2154                 /* Use alternate PCI bus number */
2155                 if (MSB(rid))
2156                         icb->firmware_options_2 |= cpu_to_le32(BIT_19);
2157                 /* Use alternate PCI devfn */
2158                 if (LSB(rid))
2159                         icb->firmware_options_2 |= cpu_to_le32(BIT_18);
2160
2161                 /* Use Disable MSIX Handshake mode for capable adapters */
2162                 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
2163                     (ha->flags.msix_enabled)) {
2164                         icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
2165                         ha->flags.disable_msix_handshake = 1;
2166                         ql_dbg(ql_dbg_init, vha, 0x00fe,
2167                             "MSIX Handshake Disable Mode turned on.\n");
2168                 } else {
2169                         icb->firmware_options_2 |= cpu_to_le32(BIT_22);
2170                 }
2171                 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
2172
2173                 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
2174                 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
2175                 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
2176                 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
2177         } else {
2178                 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
2179                 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
2180                 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
2181                 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
2182         }
2183         qlt_24xx_config_rings(vha);
2184
2185         /* PCI posting */
2186         RD_REG_DWORD(&ioreg->hccr);
2187 }
2188
2189 /**
2190  * qla2x00_init_rings() - Initializes firmware.
2191  * @ha: HA context
2192  *
2193  * Beginning of request ring has initialization control block already built
2194  * by nvram config routine.
2195  *
2196  * Returns 0 on success.
2197  */
2198 int
2199 qla2x00_init_rings(scsi_qla_host_t *vha)
2200 {
2201         int     rval;
2202         unsigned long flags = 0;
2203         int cnt, que;
2204         struct qla_hw_data *ha = vha->hw;
2205         struct req_que *req;
2206         struct rsp_que *rsp;
2207         struct mid_init_cb_24xx *mid_init_cb =
2208             (struct mid_init_cb_24xx *) ha->init_cb;
2209
2210         spin_lock_irqsave(&ha->hardware_lock, flags);
2211
2212         /* Clear outstanding commands array. */
2213         for (que = 0; que < ha->max_req_queues; que++) {
2214                 req = ha->req_q_map[que];
2215                 if (!req || !test_bit(que, ha->req_qid_map))
2216                         continue;
2217                 req->out_ptr = (void *)(req->ring + req->length);
2218                 *req->out_ptr = 0;
2219                 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
2220                         req->outstanding_cmds[cnt] = NULL;
2221
2222                 req->current_outstanding_cmd = 1;
2223
2224                 /* Initialize firmware. */
2225                 req->ring_ptr  = req->ring;
2226                 req->ring_index    = 0;
2227                 req->cnt      = req->length;
2228         }
2229
2230         for (que = 0; que < ha->max_rsp_queues; que++) {
2231                 rsp = ha->rsp_q_map[que];
2232                 if (!rsp || !test_bit(que, ha->rsp_qid_map))
2233                         continue;
2234                 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
2235                 *rsp->in_ptr = 0;
2236                 /* Initialize response queue entries */
2237                 if (IS_QLAFX00(ha))
2238                         qlafx00_init_response_q_entries(rsp);
2239                 else
2240                         qla2x00_init_response_q_entries(rsp);
2241         }
2242
2243         ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
2244         ha->tgt.atio_ring_index = 0;
2245         /* Initialize ATIO queue entries */
2246         qlt_init_atio_q_entries(vha);
2247
2248         ha->isp_ops->config_rings(vha);
2249
2250         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2251
2252         ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
2253
2254         if (IS_QLAFX00(ha)) {
2255                 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
2256                 goto next_check;
2257         }
2258
2259         /* Update any ISP specific firmware options before initialization. */
2260         ha->isp_ops->update_fw_options(vha);
2261
2262         if (ha->flags.npiv_supported) {
2263                 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
2264                         ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
2265                 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
2266         }
2267
2268         if (IS_FWI2_CAPABLE(ha)) {
2269                 mid_init_cb->options = cpu_to_le16(BIT_1);
2270                 mid_init_cb->init_cb.execution_throttle =
2271                     cpu_to_le16(ha->cur_fw_xcb_count);
2272                 /* D-Port Status */
2273                 if (IS_DPORT_CAPABLE(ha))
2274                         mid_init_cb->init_cb.firmware_options_1 |=
2275                             cpu_to_le16(BIT_7);
2276                 /* Enable FA-WWPN */
2277                 ha->flags.fawwpn_enabled =
2278                     (mid_init_cb->init_cb.firmware_options_1 & BIT_6) ? 1 : 0;
2279                 ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n",
2280                     (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
2281         }
2282
2283         rval = qla2x00_init_firmware(vha, ha->init_cb_size);
2284 next_check:
2285         if (rval) {
2286                 ql_log(ql_log_fatal, vha, 0x00d2,
2287                     "Init Firmware **** FAILED ****.\n");
2288         } else {
2289                 ql_dbg(ql_dbg_init, vha, 0x00d3,
2290                     "Init Firmware -- success.\n");
2291         }
2292
2293         return (rval);
2294 }
2295
2296 /**
2297  * qla2x00_fw_ready() - Waits for firmware ready.
2298  * @ha: HA context
2299  *
2300  * Returns 0 on success.
2301  */
2302 static int
2303 qla2x00_fw_ready(scsi_qla_host_t *vha)
2304 {
2305         int             rval;
2306         unsigned long   wtime, mtime, cs84xx_time;
2307         uint16_t        min_wait;       /* Minimum wait time if loop is down */
2308         uint16_t        wait_time;      /* Wait time if loop is coming ready */
2309         uint16_t        state[6];
2310         struct qla_hw_data *ha = vha->hw;
2311
2312         if (IS_QLAFX00(vha->hw))
2313                 return qlafx00_fw_ready(vha);
2314
2315         rval = QLA_SUCCESS;
2316
2317         /* Time to wait for loop down */
2318         if (IS_P3P_TYPE(ha))
2319                 min_wait = 30;
2320         else
2321                 min_wait = 20;
2322
2323         /*
2324          * Firmware should take at most one RATOV to login, plus 5 seconds for
2325          * our own processing.
2326          */
2327         if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
2328                 wait_time = min_wait;
2329         }
2330
2331         /* Min wait time if loop down */
2332         mtime = jiffies + (min_wait * HZ);
2333
2334         /* wait time before firmware ready */
2335         wtime = jiffies + (wait_time * HZ);
2336
2337         /* Wait for ISP to finish LIP */
2338         if (!vha->flags.init_done)
2339                 ql_log(ql_log_info, vha, 0x801e,
2340                     "Waiting for LIP to complete.\n");
2341
2342         do {
2343                 memset(state, -1, sizeof(state));
2344                 rval = qla2x00_get_firmware_state(vha, state);
2345                 if (rval == QLA_SUCCESS) {
2346                         if (state[0] < FSTATE_LOSS_OF_SYNC) {
2347                                 vha->device_flags &= ~DFLG_NO_CABLE;
2348                         }
2349                         if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
2350                                 ql_dbg(ql_dbg_taskm, vha, 0x801f,
2351                                     "fw_state=%x 84xx=%x.\n", state[0],
2352                                     state[2]);
2353                                 if ((state[2] & FSTATE_LOGGED_IN) &&
2354                                      (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
2355                                         ql_dbg(ql_dbg_taskm, vha, 0x8028,
2356                                             "Sending verify iocb.\n");
2357
2358                                         cs84xx_time = jiffies;
2359                                         rval = qla84xx_init_chip(vha);
2360                                         if (rval != QLA_SUCCESS) {
2361                                                 ql_log(ql_log_warn,
2362                                                     vha, 0x8007,
2363                                                     "Init chip failed.\n");
2364                                                 break;
2365                                         }
2366
2367                                         /* Add time taken to initialize. */
2368                                         cs84xx_time = jiffies - cs84xx_time;
2369                                         wtime += cs84xx_time;
2370                                         mtime += cs84xx_time;
2371                                         ql_dbg(ql_dbg_taskm, vha, 0x8008,
2372                                             "Increasing wait time by %ld. "
2373                                             "New time %ld.\n", cs84xx_time,
2374                                             wtime);
2375                                 }
2376                         } else if (state[0] == FSTATE_READY) {
2377                                 ql_dbg(ql_dbg_taskm, vha, 0x8037,
2378                                     "F/W Ready - OK.\n");
2379
2380                                 qla2x00_get_retry_cnt(vha, &ha->retry_count,
2381                                     &ha->login_timeout, &ha->r_a_tov);
2382
2383                                 rval = QLA_SUCCESS;
2384                                 break;
2385                         }
2386
2387                         rval = QLA_FUNCTION_FAILED;
2388
2389                         if (atomic_read(&vha->loop_down_timer) &&
2390                             state[0] != FSTATE_READY) {
2391                                 /* Loop down. Timeout on min_wait for states
2392                                  * other than Wait for Login.
2393                                  */
2394                                 if (time_after_eq(jiffies, mtime)) {
2395                                         ql_log(ql_log_info, vha, 0x8038,
2396                                             "Cable is unplugged...\n");
2397
2398                                         vha->device_flags |= DFLG_NO_CABLE;
2399                                         break;
2400                                 }
2401                         }
2402                 } else {
2403                         /* Mailbox cmd failed. Timeout on min_wait. */
2404                         if (time_after_eq(jiffies, mtime) ||
2405                                 ha->flags.isp82xx_fw_hung)
2406                                 break;
2407                 }
2408
2409                 if (time_after_eq(jiffies, wtime))
2410                         break;
2411
2412                 /* Delay for a while */
2413                 msleep(500);
2414         } while (1);
2415
2416         ql_dbg(ql_dbg_taskm, vha, 0x803a,
2417             "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
2418             state[1], state[2], state[3], state[4], state[5], jiffies);
2419
2420         if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
2421                 ql_log(ql_log_warn, vha, 0x803b,
2422                     "Firmware ready **** FAILED ****.\n");
2423         }
2424
2425         return (rval);
2426 }
2427
2428 /*
2429 *  qla2x00_configure_hba
2430 *      Setup adapter context.
2431 *
2432 * Input:
2433 *      ha = adapter state pointer.
2434 *
2435 * Returns:
2436 *      0 = success
2437 *
2438 * Context:
2439 *      Kernel context.
2440 */
2441 static int
2442 qla2x00_configure_hba(scsi_qla_host_t *vha)
2443 {
2444         int       rval;
2445         uint16_t      loop_id;
2446         uint16_t      topo;
2447         uint16_t      sw_cap;
2448         uint8_t       al_pa;
2449         uint8_t       area;
2450         uint8_t       domain;
2451         char            connect_type[22];
2452         struct qla_hw_data *ha = vha->hw;
2453         unsigned long flags;
2454         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
2455
2456         /* Get host addresses. */
2457         rval = qla2x00_get_adapter_id(vha,
2458             &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
2459         if (rval != QLA_SUCCESS) {
2460                 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
2461                     IS_CNA_CAPABLE(ha) ||
2462                     (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
2463                         ql_dbg(ql_dbg_disc, vha, 0x2008,
2464                             "Loop is in a transition state.\n");
2465                 } else {
2466                         ql_log(ql_log_warn, vha, 0x2009,
2467                             "Unable to get host loop ID.\n");
2468                         if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
2469                             (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
2470                                 ql_log(ql_log_warn, vha, 0x1151,
2471                                     "Doing link init.\n");
2472                                 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
2473                                         return rval;
2474                         }
2475                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2476                 }
2477                 return (rval);
2478         }
2479
2480         if (topo == 4) {
2481                 ql_log(ql_log_info, vha, 0x200a,
2482                     "Cannot get topology - retrying.\n");
2483                 return (QLA_FUNCTION_FAILED);
2484         }
2485
2486         vha->loop_id = loop_id;
2487
2488         /* initialize */
2489         ha->min_external_loopid = SNS_FIRST_LOOP_ID;
2490         ha->operating_mode = LOOP;
2491         ha->switch_cap = 0;
2492
2493         switch (topo) {
2494         case 0:
2495                 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
2496                 ha->current_topology = ISP_CFG_NL;
2497                 strcpy(connect_type, "(Loop)");
2498                 break;
2499
2500         case 1:
2501                 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2502                 ha->switch_cap = sw_cap;
2503                 ha->current_topology = ISP_CFG_FL;
2504                 strcpy(connect_type, "(FL_Port)");
2505                 break;
2506
2507         case 2:
2508                 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
2509                 ha->operating_mode = P2P;
2510                 ha->current_topology = ISP_CFG_N;
2511                 strcpy(connect_type, "(N_Port-to-N_Port)");
2512                 break;
2513
2514         case 3:
2515                 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2516                 ha->switch_cap = sw_cap;
2517                 ha->operating_mode = P2P;
2518                 ha->current_topology = ISP_CFG_F;
2519                 strcpy(connect_type, "(F_Port)");
2520                 break;
2521
2522         default:
2523                 ql_dbg(ql_dbg_disc, vha, 0x200f,
2524                     "HBA in unknown topology %x, using NL.\n", topo);
2525                 ha->current_topology = ISP_CFG_NL;
2526                 strcpy(connect_type, "(Loop)");
2527                 break;
2528         }
2529
2530         /* Save Host port and loop ID. */
2531         /* byte order - Big Endian */
2532         vha->d_id.b.domain = domain;
2533         vha->d_id.b.area = area;
2534         vha->d_id.b.al_pa = al_pa;
2535
2536         spin_lock_irqsave(&ha->vport_slock, flags);
2537         qlt_update_vp_map(vha, SET_AL_PA);
2538         spin_unlock_irqrestore(&ha->vport_slock, flags);
2539
2540         if (!vha->flags.init_done)
2541                 ql_log(ql_log_info, vha, 0x2010,
2542                     "Topology - %s, Host Loop address 0x%x.\n",
2543                     connect_type, vha->loop_id);
2544
2545         return(rval);
2546 }
2547
2548 inline void
2549 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
2550         char *def)
2551 {
2552         char *st, *en;
2553         uint16_t index;
2554         struct qla_hw_data *ha = vha->hw;
2555         int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
2556             !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
2557
2558         if (memcmp(model, BINZERO, len) != 0) {
2559                 strncpy(ha->model_number, model, len);
2560                 st = en = ha->model_number;
2561                 en += len - 1;
2562                 while (en > st) {
2563                         if (*en != 0x20 && *en != 0x00)
2564                                 break;
2565                         *en-- = '\0';
2566                 }
2567
2568                 index = (ha->pdev->subsystem_device & 0xff);
2569                 if (use_tbl &&
2570                     ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2571                     index < QLA_MODEL_NAMES)
2572                         strncpy(ha->model_desc,
2573                             qla2x00_model_name[index * 2 + 1],
2574                             sizeof(ha->model_desc) - 1);
2575         } else {
2576                 index = (ha->pdev->subsystem_device & 0xff);
2577                 if (use_tbl &&
2578                     ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2579                     index < QLA_MODEL_NAMES) {
2580                         strcpy(ha->model_number,
2581                             qla2x00_model_name[index * 2]);
2582                         strncpy(ha->model_desc,
2583                             qla2x00_model_name[index * 2 + 1],
2584                             sizeof(ha->model_desc) - 1);
2585                 } else {
2586                         strcpy(ha->model_number, def);
2587                 }
2588         }
2589         if (IS_FWI2_CAPABLE(ha))
2590                 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
2591                     sizeof(ha->model_desc));
2592 }
2593
2594 /* On sparc systems, obtain port and node WWN from firmware
2595  * properties.
2596  */
2597 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
2598 {
2599 #ifdef CONFIG_SPARC
2600         struct qla_hw_data *ha = vha->hw;
2601         struct pci_dev *pdev = ha->pdev;
2602         struct device_node *dp = pci_device_to_OF_node(pdev);
2603         const u8 *val;
2604         int len;
2605
2606         val = of_get_property(dp, "port-wwn", &len);
2607         if (val && len >= WWN_SIZE)
2608                 memcpy(nv->port_name, val, WWN_SIZE);
2609
2610         val = of_get_property(dp, "node-wwn", &len);
2611         if (val && len >= WWN_SIZE)
2612                 memcpy(nv->node_name, val, WWN_SIZE);
2613 #endif
2614 }
2615
2616 /*
2617 * NVRAM configuration for ISP 2xxx
2618 *
2619 * Input:
2620 *      ha                = adapter block pointer.
2621 *
2622 * Output:
2623 *      initialization control block in response_ring
2624 *      host adapters parameters in host adapter block
2625 *
2626 * Returns:
2627 *      0 = success.
2628 */
2629 int
2630 qla2x00_nvram_config(scsi_qla_host_t *vha)
2631 {
2632         int             rval;
2633         uint8_t         chksum = 0;
2634         uint16_t        cnt;
2635         uint8_t         *dptr1, *dptr2;
2636         struct qla_hw_data *ha = vha->hw;
2637         init_cb_t       *icb = ha->init_cb;
2638         nvram_t         *nv = ha->nvram;
2639         uint8_t         *ptr = ha->nvram;
2640         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2641
2642         rval = QLA_SUCCESS;
2643
2644         /* Determine NVRAM starting address. */
2645         ha->nvram_size = sizeof(nvram_t);
2646         ha->nvram_base = 0;
2647         if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
2648                 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
2649                         ha->nvram_base = 0x80;
2650
2651         /* Get NVRAM data and calculate checksum. */
2652         ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
2653         for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2654                 chksum += *ptr++;
2655
2656         ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
2657             "Contents of NVRAM.\n");
2658         ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
2659             (uint8_t *)nv, ha->nvram_size);
2660
2661         /* Bad NVRAM data, set defaults parameters. */
2662         if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2663             nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2664                 /* Reset NVRAM data. */
2665                 ql_log(ql_log_warn, vha, 0x0064,
2666                     "Inconsistent NVRAM "
2667                     "detected: checksum=0x%x id=%c version=0x%x.\n",
2668                     chksum, nv->id[0], nv->nvram_version);
2669                 ql_log(ql_log_warn, vha, 0x0065,
2670                     "Falling back to "
2671                     "functioning (yet invalid -- WWPN) defaults.\n");
2672
2673                 /*
2674                  * Set default initialization control block.
2675                  */
2676                 memset(nv, 0, ha->nvram_size);
2677                 nv->parameter_block_version = ICB_VERSION;
2678
2679                 if (IS_QLA23XX(ha)) {
2680                         nv->firmware_options[0] = BIT_2 | BIT_1;
2681                         nv->firmware_options[1] = BIT_7 | BIT_5;
2682                         nv->add_firmware_options[0] = BIT_5;
2683                         nv->add_firmware_options[1] = BIT_5 | BIT_4;
2684                         nv->frame_payload_size = 2048;
2685                         nv->special_options[1] = BIT_7;
2686                 } else if (IS_QLA2200(ha)) {
2687                         nv->firmware_options[0] = BIT_2 | BIT_1;
2688                         nv->firmware_options[1] = BIT_7 | BIT_5;
2689                         nv->add_firmware_options[0] = BIT_5;
2690                         nv->add_firmware_options[1] = BIT_5 | BIT_4;
2691                         nv->frame_payload_size = 1024;
2692                 } else if (IS_QLA2100(ha)) {
2693                         nv->firmware_options[0] = BIT_3 | BIT_1;
2694                         nv->firmware_options[1] = BIT_5;
2695                         nv->frame_payload_size = 1024;
2696                 }
2697
2698                 nv->max_iocb_allocation = cpu_to_le16(256);
2699                 nv->execution_throttle = cpu_to_le16(16);
2700                 nv->retry_count = 8;
2701                 nv->retry_delay = 1;
2702
2703                 nv->port_name[0] = 33;
2704                 nv->port_name[3] = 224;
2705                 nv->port_name[4] = 139;
2706
2707                 qla2xxx_nvram_wwn_from_ofw(vha, nv);
2708
2709                 nv->login_timeout = 4;
2710
2711                 /*
2712                  * Set default host adapter parameters
2713                  */
2714                 nv->host_p[1] = BIT_2;
2715                 nv->reset_delay = 5;
2716                 nv->port_down_retry_count = 8;
2717                 nv->max_luns_per_target = cpu_to_le16(8);
2718                 nv->link_down_timeout = 60;
2719
2720                 rval = 1;
2721         }
2722
2723 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2724         /*
2725          * The SN2 does not provide BIOS emulation which means you can't change
2726          * potentially bogus BIOS settings. Force the use of default settings
2727          * for link rate and frame size.  Hope that the rest of the settings
2728          * are valid.
2729          */
2730         if (ia64_platform_is("sn2")) {
2731                 nv->frame_payload_size = 2048;
2732                 if (IS_QLA23XX(ha))
2733                         nv->special_options[1] = BIT_7;
2734         }
2735 #endif
2736
2737         /* Reset Initialization control block */
2738         memset(icb, 0, ha->init_cb_size);
2739
2740         /*
2741          * Setup driver NVRAM options.
2742          */
2743         nv->firmware_options[0] |= (BIT_6 | BIT_1);
2744         nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
2745         nv->firmware_options[1] |= (BIT_5 | BIT_0);
2746         nv->firmware_options[1] &= ~BIT_4;
2747
2748         if (IS_QLA23XX(ha)) {
2749                 nv->firmware_options[0] |= BIT_2;
2750                 nv->firmware_options[0] &= ~BIT_3;
2751                 nv->special_options[0] &= ~BIT_6;
2752                 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
2753
2754                 if (IS_QLA2300(ha)) {
2755                         if (ha->fb_rev == FPM_2310) {
2756                                 strcpy(ha->model_number, "QLA2310");
2757                         } else {
2758                                 strcpy(ha->model_number, "QLA2300");
2759                         }
2760                 } else {
2761                         qla2x00_set_model_info(vha, nv->model_number,
2762                             sizeof(nv->model_number), "QLA23xx");
2763                 }
2764         } else if (IS_QLA2200(ha)) {
2765                 nv->firmware_options[0] |= BIT_2;
2766                 /*
2767                  * 'Point-to-point preferred, else loop' is not a safe
2768                  * connection mode setting.
2769                  */
2770                 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
2771                     (BIT_5 | BIT_4)) {
2772                         /* Force 'loop preferred, else point-to-point'. */
2773                         nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
2774                         nv->add_firmware_options[0] |= BIT_5;
2775                 }
2776                 strcpy(ha->model_number, "QLA22xx");
2777         } else /*if (IS_QLA2100(ha))*/ {
2778                 strcpy(ha->model_number, "QLA2100");
2779         }
2780
2781         /*
2782          * Copy over NVRAM RISC parameter block to initialization control block.
2783          */
2784         dptr1 = (uint8_t *)icb;
2785         dptr2 = (uint8_t *)&nv->parameter_block_version;
2786         cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
2787         while (cnt--)
2788                 *dptr1++ = *dptr2++;
2789
2790         /* Copy 2nd half. */
2791         dptr1 = (uint8_t *)icb->add_firmware_options;
2792         cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
2793         while (cnt--)
2794                 *dptr1++ = *dptr2++;
2795
2796         /* Use alternate WWN? */
2797         if (nv->host_p[1] & BIT_7) {
2798                 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
2799                 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
2800         }
2801
2802         /* Prepare nodename */
2803         if ((icb->firmware_options[1] & BIT_6) == 0) {
2804                 /*
2805                  * Firmware will apply the following mask if the nodename was
2806                  * not provided.
2807                  */
2808                 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
2809                 icb->node_name[0] &= 0xF0;
2810         }
2811
2812         /*
2813          * Set host adapter parameters.
2814          */
2815
2816         /*
2817          * BIT_7 in the host-parameters section allows for modification to
2818          * internal driver logging.
2819          */
2820         if (nv->host_p[0] & BIT_7)
2821                 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
2822         ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2823         /* Always load RISC code on non ISP2[12]00 chips. */
2824         if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
2825                 ha->flags.disable_risc_code_load = 0;
2826         ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
2827         ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
2828         ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
2829         ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
2830         ha->flags.disable_serdes = 0;
2831
2832         ha->operating_mode =
2833             (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
2834
2835         memcpy(ha->fw_seriallink_options, nv->seriallink_options,
2836             sizeof(ha->fw_seriallink_options));
2837
2838         /* save HBA serial number */
2839         ha->serial0 = icb->port_name[5];
2840         ha->serial1 = icb->port_name[6];
2841         ha->serial2 = icb->port_name[7];
2842         memcpy(vha->node_name, icb->node_name, WWN_SIZE);
2843         memcpy(vha->port_name, icb->port_name, WWN_SIZE);
2844
2845         icb->execution_throttle = cpu_to_le16(0xFFFF);
2846
2847         ha->retry_count = nv->retry_count;
2848
2849         /* Set minimum login_timeout to 4 seconds. */
2850         if (nv->login_timeout != ql2xlogintimeout)
2851                 nv->login_timeout = ql2xlogintimeout;
2852         if (nv->login_timeout < 4)
2853                 nv->login_timeout = 4;
2854         ha->login_timeout = nv->login_timeout;
2855
2856         /* Set minimum RATOV to 100 tenths of a second. */
2857         ha->r_a_tov = 100;
2858
2859         ha->loop_reset_delay = nv->reset_delay;
2860
2861         /* Link Down Timeout = 0:
2862          *
2863          *      When Port Down timer expires we will start returning
2864          *      I/O's to OS with "DID_NO_CONNECT".
2865          *
2866          * Link Down Timeout != 0:
2867          *
2868          *       The driver waits for the link to come up after link down
2869          *       before returning I/Os to OS with "DID_NO_CONNECT".
2870          */
2871         if (nv->link_down_timeout == 0) {
2872                 ha->loop_down_abort_time =
2873                     (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
2874         } else {
2875                 ha->link_down_timeout =  nv->link_down_timeout;
2876                 ha->loop_down_abort_time =
2877                     (LOOP_DOWN_TIME - ha->link_down_timeout);
2878         }
2879
2880         /*
2881          * Need enough time to try and get the port back.
2882          */
2883         ha->port_down_retry_count = nv->port_down_retry_count;
2884         if (qlport_down_retry)
2885                 ha->port_down_retry_count = qlport_down_retry;
2886         /* Set login_retry_count */
2887         ha->login_retry_count  = nv->retry_count;
2888         if (ha->port_down_retry_count == nv->port_down_retry_count &&
2889             ha->port_down_retry_count > 3)
2890                 ha->login_retry_count = ha->port_down_retry_count;
2891         else if (ha->port_down_retry_count > (int)ha->login_retry_count)
2892                 ha->login_retry_count = ha->port_down_retry_count;
2893         if (ql2xloginretrycount)
2894                 ha->login_retry_count = ql2xloginretrycount;
2895
2896         icb->lun_enables = cpu_to_le16(0);
2897         icb->command_resource_count = 0;
2898         icb->immediate_notify_resource_count = 0;
2899         icb->timeout = cpu_to_le16(0);
2900
2901         if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2902                 /* Enable RIO */
2903                 icb->firmware_options[0] &= ~BIT_3;
2904                 icb->add_firmware_options[0] &=
2905                     ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2906                 icb->add_firmware_options[0] |= BIT_2;
2907                 icb->response_accumulation_timer = 3;
2908                 icb->interrupt_delay_timer = 5;
2909
2910                 vha->flags.process_response_queue = 1;
2911         } else {
2912                 /* Enable ZIO. */
2913                 if (!vha->flags.init_done) {
2914                         ha->zio_mode = icb->add_firmware_options[0] &
2915                             (BIT_3 | BIT_2 | BIT_1 | BIT_0);
2916                         ha->zio_timer = icb->interrupt_delay_timer ?
2917                             icb->interrupt_delay_timer: 2;
2918                 }
2919                 icb->add_firmware_options[0] &=
2920                     ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2921                 vha->flags.process_response_queue = 0;
2922                 if (ha->zio_mode != QLA_ZIO_DISABLED) {
2923                         ha->zio_mode = QLA_ZIO_MODE_6;
2924
2925                         ql_log(ql_log_info, vha, 0x0068,
2926                             "ZIO mode %d enabled; timer delay (%d us).\n",
2927                             ha->zio_mode, ha->zio_timer * 100);
2928
2929                         icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
2930                         icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
2931                         vha->flags.process_response_queue = 1;
2932                 }
2933         }
2934
2935         if (rval) {
2936                 ql_log(ql_log_warn, vha, 0x0069,
2937                     "NVRAM configuration failed.\n");
2938         }
2939         return (rval);
2940 }
2941
2942 static void
2943 qla2x00_rport_del(void *data)
2944 {
2945         fc_port_t *fcport = data;
2946         struct fc_rport *rport;
2947         unsigned long flags;
2948
2949         spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2950         rport = fcport->drport ? fcport->drport: fcport->rport;
2951         fcport->drport = NULL;
2952         spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2953         if (rport)
2954                 fc_remote_port_delete(rport);
2955 }
2956
2957 /**
2958  * qla2x00_alloc_fcport() - Allocate a generic fcport.
2959  * @ha: HA context
2960  * @flags: allocation flags
2961  *
2962  * Returns a pointer to the allocated fcport, or NULL, if none available.
2963  */
2964 fc_port_t *
2965 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2966 {
2967         fc_port_t *fcport;
2968
2969         fcport = kzalloc(sizeof(fc_port_t), flags);
2970         if (!fcport)
2971                 return NULL;
2972
2973         /* Setup fcport template structure. */
2974         fcport->vha = vha;
2975         fcport->port_type = FCT_UNKNOWN;
2976         fcport->loop_id = FC_NO_LOOP_ID;
2977         qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
2978         fcport->supported_classes = FC_COS_UNSPECIFIED;
2979
2980         return fcport;
2981 }
2982
2983 /*
2984  * qla2x00_configure_loop
2985  *      Updates Fibre Channel Device Database with what is actually on loop.
2986  *
2987  * Input:
2988  *      ha                = adapter block pointer.
2989  *
2990  * Returns:
2991  *      0 = success.
2992  *      1 = error.
2993  *      2 = database was full and device was not configured.
2994  */
2995 static int
2996 qla2x00_configure_loop(scsi_qla_host_t *vha)
2997 {
2998         int  rval;
2999         unsigned long flags, save_flags;
3000         struct qla_hw_data *ha = vha->hw;
3001         rval = QLA_SUCCESS;
3002
3003         /* Get Initiator ID */
3004         if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
3005                 rval = qla2x00_configure_hba(vha);
3006                 if (rval != QLA_SUCCESS) {
3007                         ql_dbg(ql_dbg_disc, vha, 0x2013,
3008                             "Unable to configure HBA.\n");
3009                         return (rval);
3010                 }
3011         }
3012
3013         save_flags = flags = vha->dpc_flags;
3014         ql_dbg(ql_dbg_disc, vha, 0x2014,
3015             "Configure loop -- dpc flags = 0x%lx.\n", flags);
3016
3017         /*
3018          * If we have both an RSCN and PORT UPDATE pending then handle them
3019          * both at the same time.
3020          */
3021         clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3022         clear_bit(RSCN_UPDATE, &vha->dpc_flags);
3023
3024         qla2x00_get_data_rate(vha);
3025
3026         /* Determine what we need to do */
3027         if (ha->current_topology == ISP_CFG_FL &&
3028             (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
3029
3030                 set_bit(RSCN_UPDATE, &flags);
3031
3032         } else if (ha->current_topology == ISP_CFG_F &&
3033             (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
3034
3035                 set_bit(RSCN_UPDATE, &flags);
3036                 clear_bit(LOCAL_LOOP_UPDATE, &flags);
3037
3038         } else if (ha->current_topology == ISP_CFG_N) {
3039                 clear_bit(RSCN_UPDATE, &flags);
3040
3041         } else if (!vha->flags.online ||
3042             (test_bit(ABORT_ISP_ACTIVE, &flags))) {
3043
3044                 set_bit(RSCN_UPDATE, &flags);
3045                 set_bit(LOCAL_LOOP_UPDATE, &flags);
3046         }
3047
3048         if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
3049                 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
3050                         ql_dbg(ql_dbg_disc, vha, 0x2015,
3051                             "Loop resync needed, failing.\n");
3052                         rval = QLA_FUNCTION_FAILED;
3053                 } else
3054                         rval = qla2x00_configure_local_loop(vha);
3055         }
3056
3057         if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
3058                 if (LOOP_TRANSITION(vha)) {
3059                         ql_dbg(ql_dbg_disc, vha, 0x201e,
3060                             "Needs RSCN update and loop transition.\n");
3061                         rval = QLA_FUNCTION_FAILED;
3062                 }
3063                 else
3064                         rval = qla2x00_configure_fabric(vha);
3065         }
3066
3067         if (rval == QLA_SUCCESS) {
3068                 if (atomic_read(&vha->loop_down_timer) ||
3069                     test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
3070                         rval = QLA_FUNCTION_FAILED;
3071                 } else {
3072                         atomic_set(&vha->loop_state, LOOP_READY);
3073                         ql_dbg(ql_dbg_disc, vha, 0x2069,
3074                             "LOOP READY.\n");
3075
3076                         /*
3077                          * Process any ATIO queue entries that came in
3078                          * while we weren't online.
3079                          */
3080                         if (qla_tgt_mode_enabled(vha)) {
3081                                 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
3082                                         spin_lock_irqsave(&ha->tgt.atio_lock,
3083                                             flags);
3084                                         qlt_24xx_process_atio_queue(vha, 0);
3085                                         spin_unlock_irqrestore(
3086                                             &ha->tgt.atio_lock, flags);
3087                                 } else {
3088                                         spin_lock_irqsave(&ha->hardware_lock,
3089                                             flags);
3090                                         qlt_24xx_process_atio_queue(vha, 1);
3091                                         spin_unlock_irqrestore(
3092                                             &ha->hardware_lock, flags);
3093                                 }
3094                         }
3095                 }
3096         }
3097
3098         if (rval) {
3099                 ql_dbg(ql_dbg_disc, vha, 0x206a,
3100                     "%s *** FAILED ***.\n", __func__);
3101         } else {
3102                 ql_dbg(ql_dbg_disc, vha, 0x206b,
3103                     "%s: exiting normally.\n", __func__);
3104         }
3105
3106         /* Restore state if a resync event occurred during processing */
3107         if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
3108                 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
3109                         set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3110                 if (test_bit(RSCN_UPDATE, &save_flags)) {
3111                         set_bit(RSCN_UPDATE, &vha->dpc_flags);
3112                 }
3113         }
3114
3115         return (rval);
3116 }
3117
3118
3119
3120 /*
3121  * qla2x00_configure_local_loop
3122  *      Updates Fibre Channel Device Database with local loop devices.
3123  *
3124  * Input:
3125  *      ha = adapter block pointer.
3126  *
3127  * Returns:
3128  *      0 = success.
3129  */
3130 static int
3131 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
3132 {
3133         int             rval, rval2;
3134         int             found_devs;
3135         int             found;
3136         fc_port_t       *fcport, *new_fcport;
3137
3138         uint16_t        index;
3139         uint16_t        entries;
3140         char            *id_iter;
3141         uint16_t        loop_id;
3142         uint8_t         domain, area, al_pa;
3143         struct qla_hw_data *ha = vha->hw;
3144
3145         found_devs = 0;
3146         new_fcport = NULL;
3147         entries = MAX_FIBRE_DEVICES_LOOP;
3148
3149         /* Get list of logged in devices. */
3150         memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
3151         rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
3152             &entries);
3153         if (rval != QLA_SUCCESS)
3154                 goto cleanup_allocation;
3155
3156         ql_dbg(ql_dbg_disc, vha, 0x2017,
3157             "Entries in ID list (%d).\n", entries);
3158         ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
3159             (uint8_t *)ha->gid_list,
3160             entries * sizeof(struct gid_list_info));
3161
3162         /* Allocate temporary fcport for any new fcports discovered. */
3163         new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3164         if (new_fcport == NULL) {
3165                 ql_log(ql_log_warn, vha, 0x2018,
3166                     "Memory allocation failed for fcport.\n");
3167                 rval = QLA_MEMORY_ALLOC_FAILED;
3168                 goto cleanup_allocation;
3169         }
3170         new_fcport->flags &= ~FCF_FABRIC_DEVICE;
3171
3172         /*
3173          * Mark local devices that were present with FCF_DEVICE_LOST for now.
3174          */
3175         list_for_each_entry(fcport, &vha->vp_fcports, list) {
3176                 if (atomic_read(&fcport->state) == FCS_ONLINE &&
3177                     fcport->port_type != FCT_BROADCAST &&
3178                     (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3179
3180                         ql_dbg(ql_dbg_disc, vha, 0x2019,
3181                             "Marking port lost loop_id=0x%04x.\n",
3182                             fcport->loop_id);
3183
3184                         qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
3185                 }
3186         }
3187
3188         /* Add devices to port list. */
3189         id_iter = (char *)ha->gid_list;
3190         for (index = 0; index < entries; index++) {
3191                 domain = ((struct gid_list_info *)id_iter)->domain;
3192                 area = ((struct gid_list_info *)id_iter)->area;
3193                 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
3194                 if (IS_QLA2100(ha) || IS_QLA2200(ha))
3195                         loop_id = (uint16_t)
3196                             ((struct gid_list_info *)id_iter)->loop_id_2100;
3197                 else
3198                         loop_id = le16_to_cpu(
3199                             ((struct gid_list_info *)id_iter)->loop_id);
3200                 id_iter += ha->gid_list_info_size;
3201
3202                 /* Bypass reserved domain fields. */
3203                 if ((domain & 0xf0) == 0xf0)
3204                         continue;
3205
3206                 /* Bypass if not same domain and area of adapter. */
3207                 if (area && domain &&
3208                     (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
3209                         continue;
3210
3211                 /* Bypass invalid local loop ID. */
3212                 if (loop_id > LAST_LOCAL_LOOP_ID)
3213                         continue;
3214
3215                 memset(new_fcport, 0, sizeof(fc_port_t));
3216
3217                 /* Fill in member data. */
3218                 new_fcport->d_id.b.domain = domain;
3219                 new_fcport->d_id.b.area = area;
3220                 new_fcport->d_id.b.al_pa = al_pa;
3221                 new_fcport->loop_id = loop_id;
3222                 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
3223                 if (rval2 != QLA_SUCCESS) {
3224                         ql_dbg(ql_dbg_disc, vha, 0x201a,
3225                             "Failed to retrieve fcport information "
3226                             "-- get_port_database=%x, loop_id=0x%04x.\n",
3227                             rval2, new_fcport->loop_id);
3228                         ql_dbg(ql_dbg_disc, vha, 0x201b,
3229                             "Scheduling resync.\n");
3230                         set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3231                         continue;
3232                 }
3233
3234                 /* Check for matching device in port list. */
3235                 found = 0;
3236                 fcport = NULL;
3237                 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3238                         if (memcmp(new_fcport->port_name, fcport->port_name,
3239                             WWN_SIZE))
3240                                 continue;
3241
3242                         fcport->flags &= ~FCF_FABRIC_DEVICE;
3243                         fcport->loop_id = new_fcport->loop_id;
3244                         fcport->port_type = new_fcport->port_type;
3245                         fcport->d_id.b24 = new_fcport->d_id.b24;
3246                         memcpy(fcport->node_name, new_fcport->node_name,
3247                             WWN_SIZE);
3248
3249                         found++;
3250                         break;
3251                 }
3252
3253                 if (!found) {
3254                         /* New device, add to fcports list. */
3255                         list_add_tail(&new_fcport->list, &vha->vp_fcports);
3256
3257                         /* Allocate a new replacement fcport. */
3258                         fcport = new_fcport;
3259                         new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3260                         if (new_fcport == NULL) {
3261                                 ql_log(ql_log_warn, vha, 0x201c,
3262                                     "Failed to allocate memory for fcport.\n");
3263                                 rval = QLA_MEMORY_ALLOC_FAILED;
3264                                 goto cleanup_allocation;
3265                         }
3266                         new_fcport->flags &= ~FCF_FABRIC_DEVICE;
3267                 }
3268
3269                 /* Base iIDMA settings on HBA port speed. */
3270                 fcport->fp_speed = ha->link_data_rate;
3271
3272                 qla2x00_update_fcport(vha, fcport);
3273
3274                 found_devs++;
3275         }
3276
3277 cleanup_allocation:
3278         kfree(new_fcport);
3279
3280         if (rval != QLA_SUCCESS) {
3281                 ql_dbg(ql_dbg_disc, vha, 0x201d,
3282                     "Configure local loop error exit: rval=%x.\n", rval);
3283         }
3284
3285         return (rval);
3286 }
3287
3288 static void
3289 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3290 {
3291         int rval;
3292         uint16_t mb[MAILBOX_REGISTER_COUNT];
3293         struct qla_hw_data *ha = vha->hw;
3294
3295         if (!IS_IIDMA_CAPABLE(ha))
3296                 return;
3297
3298         if (atomic_read(&fcport->state) != FCS_ONLINE)
3299                 return;
3300
3301         if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
3302             fcport->fp_speed > ha->link_data_rate)
3303                 return;
3304
3305         rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
3306             mb);
3307         if (rval != QLA_SUCCESS) {
3308                 ql_dbg(ql_dbg_disc, vha, 0x2004,
3309                     "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
3310                     fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
3311         } else {
3312                 ql_dbg(ql_dbg_disc, vha, 0x2005,
3313                     "iIDMA adjusted to %s GB/s on %8phN.\n",
3314                     qla2x00_get_link_speed_str(ha, fcport->fp_speed),
3315                     fcport->port_name);
3316         }
3317 }
3318
3319 static void
3320 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
3321 {
3322         struct fc_rport_identifiers rport_ids;
3323         struct fc_rport *rport;
3324         unsigned long flags;
3325
3326         rport_ids.node_name = wwn_to_u64(fcport->node_name);
3327         rport_ids.port_name = wwn_to_u64(fcport->port_name);
3328         rport_ids.port_id = fcport->d_id.b.domain << 16 |
3329             fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
3330         rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
3331         fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
3332         if (!rport) {
3333                 ql_log(ql_log_warn, vha, 0x2006,
3334                     "Unable to allocate fc remote port.\n");
3335                 return;
3336         }
3337         /*
3338          * Create target mode FC NEXUS in qla_target.c if target mode is
3339          * enabled..
3340          */
3341
3342         qlt_fc_port_added(vha, fcport);
3343
3344         spin_lock_irqsave(fcport->vha->host->host_lock, flags);
3345         *((fc_port_t **)rport->dd_data) = fcport;
3346         spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
3347
3348         rport->supported_classes = fcport->supported_classes;
3349
3350         rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
3351         if (fcport->port_type == FCT_INITIATOR)
3352                 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
3353         if (fcport->port_type == FCT_TARGET)
3354                 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
3355         fc_remote_port_rolechg(rport, rport_ids.roles);
3356 }
3357
3358 /*
3359  * qla2x00_update_fcport
3360  *      Updates device on list.
3361  *
3362  * Input:
3363  *      ha = adapter block pointer.
3364  *      fcport = port structure pointer.
3365  *
3366  * Return:
3367  *      0  - Success
3368  *  BIT_0 - error
3369  *
3370  * Context:
3371  *      Kernel context.
3372  */
3373 void
3374 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3375 {
3376         fcport->vha = vha;
3377
3378         if (IS_QLAFX00(vha->hw)) {
3379                 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3380                 goto reg_port;
3381         }
3382         fcport->login_retry = 0;
3383         fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
3384
3385         qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3386         qla2x00_iidma_fcport(vha, fcport);
3387         qla24xx_update_fcport_fcp_prio(vha, fcport);
3388
3389 reg_port:
3390         if (qla_ini_mode_enabled(vha))
3391                 qla2x00_reg_remote_port(vha, fcport);
3392         else {
3393                 /*
3394                  * Create target mode FC NEXUS in qla_target.c
3395                  */
3396                 qlt_fc_port_added(vha, fcport);
3397         }
3398 }
3399
3400 /*
3401  * qla2x00_configure_fabric
3402  *      Setup SNS devices with loop ID's.
3403  *
3404  * Input:
3405  *      ha = adapter block pointer.
3406  *
3407  * Returns:
3408  *      0 = success.
3409  *      BIT_0 = error
3410  */
3411 static int
3412 qla2x00_configure_fabric(scsi_qla_host_t *vha)
3413 {
3414         int     rval;
3415         fc_port_t       *fcport, *fcptemp;
3416         uint16_t        next_loopid;
3417         uint16_t        mb[MAILBOX_REGISTER_COUNT];
3418         uint16_t        loop_id;
3419         LIST_HEAD(new_fcports);
3420         struct qla_hw_data *ha = vha->hw;
3421         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3422         int             discovery_gen;
3423
3424         /* If FL port exists, then SNS is present */
3425         if (IS_FWI2_CAPABLE(ha))
3426                 loop_id = NPH_F_PORT;
3427         else
3428                 loop_id = SNS_FL_PORT;
3429         rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
3430         if (rval != QLA_SUCCESS) {
3431                 ql_dbg(ql_dbg_disc, vha, 0x201f,
3432                     "MBX_GET_PORT_NAME failed, No FL Port.\n");
3433
3434                 vha->device_flags &= ~SWITCH_FOUND;
3435                 return (QLA_SUCCESS);
3436         }
3437         vha->device_flags |= SWITCH_FOUND;
3438
3439         do {
3440                 /* FDMI support. */
3441                 if (ql2xfdmienable &&
3442                     test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
3443                         qla2x00_fdmi_register(vha);
3444
3445                 /* Ensure we are logged into the SNS. */
3446                 if (IS_FWI2_CAPABLE(ha))
3447                         loop_id = NPH_SNS;
3448                 else
3449                         loop_id = SIMPLE_NAME_SERVER;
3450                 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
3451                     0xfc, mb, BIT_1|BIT_0);
3452                 if (rval != QLA_SUCCESS) {
3453                         set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3454                         return rval;
3455                 }
3456                 if (mb[0] != MBS_COMMAND_COMPLETE) {
3457                         ql_dbg(ql_dbg_disc, vha, 0x2042,
3458                             "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
3459                             "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
3460                             mb[2], mb[6], mb[7]);
3461                         return (QLA_SUCCESS);
3462                 }
3463
3464                 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
3465                         if (qla2x00_rft_id(vha)) {
3466                                 /* EMPTY */
3467                                 ql_dbg(ql_dbg_disc, vha, 0x2045,
3468                                     "Register FC-4 TYPE failed.\n");
3469                         }
3470                         if (qla2x00_rff_id(vha)) {
3471                                 /* EMPTY */
3472                                 ql_dbg(ql_dbg_disc, vha, 0x2049,
3473                                     "Register FC-4 Features failed.\n");
3474                         }
3475                         if (qla2x00_rnn_id(vha)) {
3476                                 /* EMPTY */
3477                                 ql_dbg(ql_dbg_disc, vha, 0x204f,
3478                                     "Register Node Name failed.\n");
3479                         } else if (qla2x00_rsnn_nn(vha)) {
3480                                 /* EMPTY */
3481                                 ql_dbg(ql_dbg_disc, vha, 0x2053,
3482                                     "Register Symobilic Node Name failed.\n");
3483                         }
3484                 }
3485
3486 #define QLA_FCPORT_SCAN         1
3487 #define QLA_FCPORT_FOUND        2
3488
3489                 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3490                         fcport->scan_state = QLA_FCPORT_SCAN;
3491                 }
3492
3493                 /* Mark the time right before querying FW for connected ports.
3494                  * This process is long, asynchronous and by the time it's done,
3495                  * collected information might not be accurate anymore. E.g.
3496                  * disconnected port might have re-connected and a brand new
3497                  * session has been created. In this case session's generation
3498                  * will be newer than discovery_gen. */
3499                 qlt_do_generation_tick(vha, &discovery_gen);
3500
3501                 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3502                 if (rval != QLA_SUCCESS)
3503                         break;
3504
3505                 /*
3506                  * Logout all previous fabric devices marked lost, except
3507                  * FCP2 devices.
3508                  */
3509                 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3510                         if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3511                                 break;
3512
3513                         if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3514                                 continue;
3515
3516                         if (fcport->scan_state == QLA_FCPORT_SCAN) {
3517                                 if (qla_ini_mode_enabled(base_vha) &&
3518                                     atomic_read(&fcport->state) == FCS_ONLINE) {
3519                                         qla2x00_mark_device_lost(vha, fcport,
3520                                             ql2xplogiabsentdevice, 0);
3521                                         if (fcport->loop_id != FC_NO_LOOP_ID &&
3522                                             (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3523                                             fcport->port_type != FCT_INITIATOR &&
3524                                             fcport->port_type != FCT_BROADCAST) {
3525                                                 ha->isp_ops->fabric_logout(vha,
3526                                                     fcport->loop_id,
3527                                                     fcport->d_id.b.domain,
3528                                                     fcport->d_id.b.area,
3529                                                     fcport->d_id.b.al_pa);
3530                                                 qla2x00_clear_loop_id(fcport);
3531                                         }
3532                                 } else if (!qla_ini_mode_enabled(base_vha)) {
3533                                         /*
3534                                          * In target mode, explicitly kill
3535                                          * sessions and log out of devices
3536                                          * that are gone, so that we don't
3537                                          * end up with an initiator using the
3538                                          * wrong ACL (if the fabric recycles
3539                                          * an FC address and we have a stale
3540                                          * session around) and so that we don't
3541                                          * report initiators that are no longer
3542                                          * on the fabric.
3543                                          */
3544                                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
3545                                             "port gone, logging out/killing session: "
3546                                             "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
3547                                             "scan_state %d\n",
3548                                             fcport->port_name,
3549                                             atomic_read(&fcport->state),
3550                                             fcport->flags, fcport->fc4_type,
3551                                             fcport->scan_state);
3552                                         qlt_fc_port_deleted(vha, fcport,
3553                                             discovery_gen);
3554                                 }
3555                         }
3556                 }
3557
3558                 /* Starting free loop ID. */
3559                 next_loopid = ha->min_external_loopid;
3560
3561                 /*
3562                  * Scan through our port list and login entries that need to be
3563                  * logged in.
3564                  */
3565                 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3566                         if (atomic_read(&vha->loop_down_timer) ||
3567                             test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3568                                 break;
3569
3570                         if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3571                             (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3572                                 continue;
3573
3574                         /*
3575                          * If we're not an initiator, skip looking for devices
3576                          * and logging in.  There's no reason for us to do it,
3577                          * and it seems to actively cause problems in target
3578                          * mode if we race with the initiator logging into us
3579                          * (we might get the "port ID used" status back from
3580                          * our login command and log out the initiator, which
3581                          * seems to cause havoc).
3582                          */
3583                         if (!qla_ini_mode_enabled(base_vha)) {
3584                                 if (fcport->scan_state == QLA_FCPORT_FOUND) {
3585                                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
3586                                             "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
3587                                             "scan_state %d (initiator mode disabled; skipping "
3588                                             "login)\n", fcport->port_name,
3589                                             atomic_read(&fcport->state),
3590                                             fcport->flags, fcport->fc4_type,
3591                                             fcport->scan_state);
3592                                 }
3593                                 continue;
3594                         }
3595
3596                         if (fcport->loop_id == FC_NO_LOOP_ID) {
3597                                 fcport->loop_id = next_loopid;
3598                                 rval = qla2x00_find_new_loop_id(
3599                                     base_vha, fcport);
3600                                 if (rval != QLA_SUCCESS) {
3601                                         /* Ran out of IDs to use */
3602                                         break;
3603                                 }
3604                         }
3605                         /* Login and update database */
3606                         qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3607                 }
3608
3609                 /* Exit if out of loop IDs. */
3610                 if (rval != QLA_SUCCESS) {
3611                         break;
3612                 }
3613
3614                 /*
3615                  * Login and add the new devices to our port list.
3616                  */
3617                 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3618                         if (atomic_read(&vha->loop_down_timer) ||
3619                             test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3620                                 break;
3621
3622                         /*
3623                          * If we're not an initiator, skip looking for devices
3624                          * and logging in.  There's no reason for us to do it,
3625                          * and it seems to actively cause problems in target
3626                          * mode if we race with the initiator logging into us
3627                          * (we might get the "port ID used" status back from
3628                          * our login command and log out the initiator, which
3629                          * seems to cause havoc).
3630                          */
3631                         if (qla_ini_mode_enabled(base_vha)) {
3632                                 /* Find a new loop ID to use. */
3633                                 fcport->loop_id = next_loopid;
3634                                 rval = qla2x00_find_new_loop_id(base_vha,
3635                                     fcport);
3636                                 if (rval != QLA_SUCCESS) {
3637                                         /* Ran out of IDs to use */
3638                                         break;
3639                                 }
3640
3641                                 /* Login and update database */
3642                                 qla2x00_fabric_dev_login(vha, fcport,
3643                                     &next_loopid);
3644                         } else {
3645                                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
3646                                         "new port %8phC state 0x%x flags 0x%x fc4_type "
3647                                         "0x%x scan_state %d (initiator mode disabled; "
3648                                         "skipping login)\n",
3649                                         fcport->port_name,
3650                                         atomic_read(&fcport->state),
3651                                         fcport->flags, fcport->fc4_type,
3652                                         fcport->scan_state);
3653                         }
3654
3655                         list_move_tail(&fcport->list, &vha->vp_fcports);
3656                 }
3657         } while (0);
3658
3659         /* Free all new device structures not processed. */
3660         list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3661                 list_del(&fcport->list);
3662                 kfree(fcport);
3663         }
3664
3665         if (rval) {
3666                 ql_dbg(ql_dbg_disc, vha, 0x2068,
3667                     "Configure fabric error exit rval=%d.\n", rval);
3668         }
3669
3670         return (rval);
3671 }
3672
3673 /*
3674  * qla2x00_find_all_fabric_devs
3675  *
3676  * Input:
3677  *      ha = adapter block pointer.
3678  *      dev = database device entry pointer.
3679  *
3680  * Returns:
3681  *      0 = success.
3682  *
3683  * Context:
3684  *      Kernel context.
3685  */
3686 static int
3687 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3688         struct list_head *new_fcports)
3689 {
3690         int             rval;
3691         uint16_t        loop_id;
3692         fc_port_t       *fcport, *new_fcport, *fcptemp;
3693         int             found;
3694
3695         sw_info_t       *swl;
3696         int             swl_idx;
3697         int             first_dev, last_dev;
3698         port_id_t       wrap = {}, nxt_d_id;
3699         struct qla_hw_data *ha = vha->hw;
3700         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3701
3702         rval = QLA_SUCCESS;
3703
3704         /* Try GID_PT to get device list, else GAN. */
3705         if (!ha->swl)
3706                 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
3707                     GFP_KERNEL);
3708         swl = ha->swl;
3709         if (!swl) {
3710                 /*EMPTY*/
3711                 ql_dbg(ql_dbg_disc, vha, 0x2054,
3712                     "GID_PT allocations failed, fallback on GA_NXT.\n");
3713         } else {
3714                 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
3715                 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3716                         swl = NULL;
3717                 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
3718                         swl = NULL;
3719                 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
3720                         swl = NULL;
3721                 } else if (ql2xiidmaenable &&
3722                     qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
3723                         qla2x00_gpsc(vha, swl);
3724                 }
3725
3726                 /* If other queries succeeded probe for FC-4 type */
3727                 if (swl)
3728                         qla2x00_gff_id(vha, swl);
3729         }
3730         swl_idx = 0;
3731
3732         /* Allocate temporary fcport for any new fcports discovered. */
3733         new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3734         if (new_fcport == NULL) {
3735                 ql_log(ql_log_warn, vha, 0x205e,
3736                     "Failed to allocate memory for fcport.\n");
3737                 return (QLA_MEMORY_ALLOC_FAILED);
3738         }
3739         new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3740         /* Set start port ID scan at adapter ID. */
3741         first_dev = 1;
3742         last_dev = 0;
3743
3744         /* Starting free loop ID. */
3745         loop_id = ha->min_external_loopid;
3746         for (; loop_id <= ha->max_loop_id; loop_id++) {
3747                 if (qla2x00_is_reserved_id(vha, loop_id))
3748                         continue;
3749
3750                 if (ha->current_topology == ISP_CFG_FL &&
3751                     (atomic_read(&vha->loop_down_timer) ||
3752                      LOOP_TRANSITION(vha))) {
3753                         atomic_set(&vha->loop_down_timer, 0);
3754                         set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3755                         set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3756                         break;
3757                 }
3758
3759                 if (swl != NULL) {
3760                         if (last_dev) {
3761                                 wrap.b24 = new_fcport->d_id.b24;
3762                         } else {
3763                                 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
3764                                 memcpy(new_fcport->node_name,
3765                                     swl[swl_idx].node_name, WWN_SIZE);
3766                                 memcpy(new_fcport->port_name,
3767                                     swl[swl_idx].port_name, WWN_SIZE);
3768                                 memcpy(new_fcport->fabric_port_name,
3769                                     swl[swl_idx].fabric_port_name, WWN_SIZE);
3770                                 new_fcport->fp_speed = swl[swl_idx].fp_speed;
3771                                 new_fcport->fc4_type = swl[swl_idx].fc4_type;
3772
3773                                 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
3774                                         last_dev = 1;
3775                                 }
3776                                 swl_idx++;
3777                         }
3778                 } else {
3779                         /* Send GA_NXT to the switch */
3780                         rval = qla2x00_ga_nxt(vha, new_fcport);
3781                         if (rval != QLA_SUCCESS) {
3782                                 ql_log(ql_log_warn, vha, 0x2064,
3783                                     "SNS scan failed -- assuming "
3784                                     "zero-entry result.\n");
3785                                 list_for_each_entry_safe(fcport, fcptemp,
3786                                     new_fcports, list) {
3787                                         list_del(&fcport->list);
3788                                         kfree(fcport);
3789                                 }
3790                                 rval = QLA_SUCCESS;
3791                                 break;
3792                         }
3793                 }
3794
3795                 /* If wrap on switch device list, exit. */
3796                 if (first_dev) {
3797                         wrap.b24 = new_fcport->d_id.b24;
3798                         first_dev = 0;
3799                 } else if (new_fcport->d_id.b24 == wrap.b24) {
3800                         ql_dbg(ql_dbg_disc, vha, 0x2065,
3801                             "Device wrap (%02x%02x%02x).\n",
3802                             new_fcport->d_id.b.domain,
3803                             new_fcport->d_id.b.area,
3804                             new_fcport->d_id.b.al_pa);
3805                         break;
3806                 }
3807
3808                 /* Bypass if same physical adapter. */
3809                 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
3810                         continue;
3811
3812                 /* Bypass virtual ports of the same host. */
3813                 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
3814                         continue;
3815
3816                 /* Bypass if same domain and area of adapter. */
3817                 if (((new_fcport->d_id.b24 & 0xffff00) ==
3818                     (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
3819                         ISP_CFG_FL)
3820                             continue;
3821
3822                 /* Bypass reserved domain fields. */
3823                 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
3824                         continue;
3825
3826                 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
3827                 if (ql2xgffidenable &&
3828                     (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
3829                     new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
3830                         continue;
3831
3832                 /* Locate matching device in database. */
3833                 found = 0;
3834                 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3835                         if (memcmp(new_fcport->port_name, fcport->port_name,
3836                             WWN_SIZE))
3837                                 continue;
3838
3839                         fcport->scan_state = QLA_FCPORT_FOUND;
3840
3841                         found++;
3842
3843                         /* Update port state. */
3844                         memcpy(fcport->fabric_port_name,
3845                             new_fcport->fabric_port_name, WWN_SIZE);
3846                         fcport->fp_speed = new_fcport->fp_speed;
3847
3848                         /*
3849                          * If address the same and state FCS_ONLINE
3850                          * (or in target mode), nothing changed.
3851                          */
3852                         if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3853                             (atomic_read(&fcport->state) == FCS_ONLINE ||
3854                              !qla_ini_mode_enabled(base_vha))) {
3855                                 break;
3856                         }
3857
3858                         /*
3859                          * If device was not a fabric device before.
3860                          */
3861                         if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3862                                 fcport->d_id.b24 = new_fcport->d_id.b24;
3863                                 qla2x00_clear_loop_id(fcport);
3864                                 fcport->flags |= (FCF_FABRIC_DEVICE |
3865                                     FCF_LOGIN_NEEDED);
3866                                 break;
3867                         }
3868
3869                         /*
3870                          * Port ID changed or device was marked to be updated;
3871                          * Log it out if still logged in and mark it for
3872                          * relogin later.
3873                          */
3874                         if (!qla_ini_mode_enabled(base_vha)) {
3875                                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
3876                                          "port changed FC ID, %8phC"
3877                                          " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
3878                                          fcport->port_name,
3879                                          fcport->d_id.b.domain,
3880                                          fcport->d_id.b.area,
3881                                          fcport->d_id.b.al_pa,
3882                                          fcport->loop_id,
3883                                          new_fcport->d_id.b.domain,
3884                                          new_fcport->d_id.b.area,
3885                                          new_fcport->d_id.b.al_pa);
3886                                 fcport->d_id.b24 = new_fcport->d_id.b24;
3887                                 break;
3888                         }
3889
3890                         fcport->d_id.b24 = new_fcport->d_id.b24;
3891                         fcport->flags |= FCF_LOGIN_NEEDED;
3892                         if (fcport->loop_id != FC_NO_LOOP_ID &&
3893                             (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3894                             (fcport->flags & FCF_ASYNC_SENT) == 0 &&
3895                             fcport->port_type != FCT_INITIATOR &&
3896                             fcport->port_type != FCT_BROADCAST) {
3897                                 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3898                                     fcport->d_id.b.domain, fcport->d_id.b.area,
3899                                     fcport->d_id.b.al_pa);
3900                                 qla2x00_clear_loop_id(fcport);
3901                         }
3902
3903                         break;
3904                 }
3905
3906                 if (found)
3907                         continue;
3908                 /* If device was not in our fcports list, then add it. */
3909                 new_fcport->scan_state = QLA_FCPORT_FOUND;
3910                 list_add_tail(&new_fcport->list, new_fcports);
3911
3912                 /* Allocate a new replacement fcport. */
3913                 nxt_d_id.b24 = new_fcport->d_id.b24;
3914                 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3915                 if (new_fcport == NULL) {
3916                         ql_log(ql_log_warn, vha, 0x2066,
3917                             "Memory allocation failed for fcport.\n");
3918                         return (QLA_MEMORY_ALLOC_FAILED);
3919                 }
3920                 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3921                 new_fcport->d_id.b24 = nxt_d_id.b24;
3922         }
3923
3924         kfree(new_fcport);
3925
3926         return (rval);
3927 }
3928
3929 /*
3930  * qla2x00_find_new_loop_id
3931  *      Scan through our port list and find a new usable loop ID.
3932  *
3933  * Input:
3934  *      ha:     adapter state pointer.
3935  *      dev:    port structure pointer.
3936  *
3937  * Returns:
3938  *      qla2x00 local function return status code.
3939  *
3940  * Context:
3941  *      Kernel context.
3942  */
3943 int
3944 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3945 {
3946         int     rval;
3947         struct qla_hw_data *ha = vha->hw;
3948         unsigned long flags = 0;
3949
3950         rval = QLA_SUCCESS;
3951
3952         spin_lock_irqsave(&ha->vport_slock, flags);
3953
3954         dev->loop_id = find_first_zero_bit(ha->loop_id_map,
3955             LOOPID_MAP_SIZE);
3956         if (dev->loop_id >= LOOPID_MAP_SIZE ||
3957             qla2x00_is_reserved_id(vha, dev->loop_id)) {
3958                 dev->loop_id = FC_NO_LOOP_ID;
3959                 rval = QLA_FUNCTION_FAILED;
3960         } else
3961                 set_bit(dev->loop_id, ha->loop_id_map);
3962
3963         spin_unlock_irqrestore(&ha->vport_slock, flags);
3964
3965         if (rval == QLA_SUCCESS)
3966                 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
3967                     "Assigning new loopid=%x, portid=%x.\n",
3968                     dev->loop_id, dev->d_id.b24);
3969         else
3970                 ql_log(ql_log_warn, dev->vha, 0x2087,
3971                     "No loop_id's available, portid=%x.\n",
3972                     dev->d_id.b24);
3973
3974         return (rval);
3975 }
3976
3977 /*
3978  * qla2x00_fabric_dev_login
3979  *      Login fabric target device and update FC port database.
3980  *
3981  * Input:
3982  *      ha:             adapter state pointer.
3983  *      fcport:         port structure list pointer.
3984  *      next_loopid:    contains value of a new loop ID that can be used
3985  *                      by the next login attempt.
3986  *
3987  * Returns:
3988  *      qla2x00 local function return status code.
3989  *
3990  * Context:
3991  *      Kernel context.
3992  */
3993 static int
3994 qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3995     uint16_t *next_loopid)
3996 {
3997         int     rval;
3998         uint8_t opts;
3999         struct qla_hw_data *ha = vha->hw;
4000
4001         rval = QLA_SUCCESS;
4002
4003         if (IS_ALOGIO_CAPABLE(ha)) {
4004                 if (fcport->flags & FCF_ASYNC_SENT)
4005                         return rval;
4006                 fcport->flags |= FCF_ASYNC_SENT;
4007                 rval = qla2x00_post_async_login_work(vha, fcport, NULL);
4008                 if (!rval)
4009                         return rval;
4010         }
4011
4012         fcport->flags &= ~FCF_ASYNC_SENT;
4013         rval = qla2x00_fabric_login(vha, fcport, next_loopid);
4014         if (rval == QLA_SUCCESS) {
4015                 /* Send an ADISC to FCP2 devices.*/
4016                 opts = 0;
4017                 if (fcport->flags & FCF_FCP2_DEVICE)
4018                         opts |= BIT_1;
4019                 rval = qla2x00_get_port_database(vha, fcport, opts);
4020                 if (rval != QLA_SUCCESS) {
4021                         ha->isp_ops->fabric_logout(vha, fcport->loop_id,
4022                             fcport->d_id.b.domain, fcport->d_id.b.area,
4023                             fcport->d_id.b.al_pa);
4024                         qla2x00_mark_device_lost(vha, fcport, 1, 0);
4025                 } else {
4026                         qla2x00_update_fcport(vha, fcport);
4027                 }
4028         } else {
4029                 /* Retry Login. */
4030                 qla2x00_mark_device_lost(vha, fcport, 1, 0);
4031         }
4032
4033         return (rval);
4034 }
4035
4036 /*
4037  * qla2x00_fabric_login
4038  *      Issue fabric login command.
4039  *
4040  * Input:
4041  *      ha = adapter block pointer.
4042  *      device = pointer to FC device type structure.
4043  *
4044  * Returns:
4045  *      0 - Login successfully
4046  *      1 - Login failed
4047  *      2 - Initiator device
4048  *      3 - Fatal error
4049  */
4050 int
4051 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
4052     uint16_t *next_loopid)
4053 {
4054         int     rval;
4055         int     retry;
4056         uint16_t tmp_loopid;
4057         uint16_t mb[MAILBOX_REGISTER_COUNT];
4058         struct qla_hw_data *ha = vha->hw;
4059
4060         retry = 0;
4061         tmp_loopid = 0;
4062
4063         for (;;) {
4064                 ql_dbg(ql_dbg_disc, vha, 0x2000,
4065                     "Trying Fabric Login w/loop id 0x%04x for port "
4066                     "%02x%02x%02x.\n",
4067                     fcport->loop_id, fcport->d_id.b.domain,
4068                     fcport->d_id.b.area, fcport->d_id.b.al_pa);
4069
4070                 /* Login fcport on switch. */
4071                 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
4072                     fcport->d_id.b.domain, fcport->d_id.b.area,
4073                     fcport->d_id.b.al_pa, mb, BIT_0);
4074                 if (rval != QLA_SUCCESS) {
4075                         return rval;
4076                 }
4077                 if (mb[0] == MBS_PORT_ID_USED) {
4078                         /*
4079                          * Device has another loop ID.  The firmware team
4080                          * recommends the driver perform an implicit login with
4081                          * the specified ID again. The ID we just used is save
4082                          * here so we return with an ID that can be tried by
4083                          * the next login.
4084                          */
4085                         retry++;
4086                         tmp_loopid = fcport->loop_id;
4087                         fcport->loop_id = mb[1];
4088
4089                         ql_dbg(ql_dbg_disc, vha, 0x2001,
4090                             "Fabric Login: port in use - next loop "
4091                             "id=0x%04x, port id= %02x%02x%02x.\n",
4092                             fcport->loop_id, fcport->d_id.b.domain,
4093                             fcport->d_id.b.area, fcport->d_id.b.al_pa);
4094
4095                 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
4096                         /*
4097                          * Login succeeded.
4098                          */
4099                         if (retry) {
4100                                 /* A retry occurred before. */
4101                                 *next_loopid = tmp_loopid;
4102                         } else {
4103                                 /*
4104                                  * No retry occurred before. Just increment the
4105                                  * ID value for next login.
4106                                  */
4107                                 *next_loopid = (fcport->loop_id + 1);
4108                         }
4109
4110                         if (mb[1] & BIT_0) {
4111                                 fcport->port_type = FCT_INITIATOR;
4112                         } else {
4113                                 fcport->port_type = FCT_TARGET;
4114                                 if (mb[1] & BIT_1) {
4115                                         fcport->flags |= FCF_FCP2_DEVICE;
4116                                 }
4117                         }
4118
4119                         if (mb[10] & BIT_0)
4120                                 fcport->supported_classes |= FC_COS_CLASS2;
4121                         if (mb[10] & BIT_1)
4122                                 fcport->supported_classes |= FC_COS_CLASS3;
4123
4124                         if (IS_FWI2_CAPABLE(ha)) {
4125                                 if (mb[10] & BIT_7)
4126                                         fcport->flags |=
4127                                             FCF_CONF_COMP_SUPPORTED;
4128                         }
4129
4130                         rval = QLA_SUCCESS;
4131                         break;
4132                 } else if (mb[0] == MBS_LOOP_ID_USED) {
4133                         /*
4134                          * Loop ID already used, try next loop ID.
4135                          */
4136                         fcport->loop_id++;
4137                         rval = qla2x00_find_new_loop_id(vha, fcport);
4138                         if (rval != QLA_SUCCESS) {
4139                                 /* Ran out of loop IDs to use */
4140                                 break;
4141                         }
4142                 } else if (mb[0] == MBS_COMMAND_ERROR) {
4143                         /*
4144                          * Firmware possibly timed out during login. If NO
4145                          * retries are left to do then the device is declared
4146                          * dead.
4147                          */
4148                         *next_loopid = fcport->loop_id;
4149                         ha->isp_ops->fabric_logout(vha, fcport->loop_id,
4150                             fcport->d_id.b.domain, fcport->d_id.b.area,
4151                             fcport->d_id.b.al_pa);
4152                         qla2x00_mark_device_lost(vha, fcport, 1, 0);
4153
4154                         rval = 1;
4155                         break;
4156                 } else {
4157                         /*
4158                          * unrecoverable / not handled error
4159                          */
4160                         ql_dbg(ql_dbg_disc, vha, 0x2002,
4161                             "Failed=%x port_id=%02x%02x%02x loop_id=%x "
4162                             "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
4163                             fcport->d_id.b.area, fcport->d_id.b.al_pa,
4164                             fcport->loop_id, jiffies);
4165
4166                         *next_loopid = fcport->loop_id;
4167                         ha->isp_ops->fabric_logout(vha, fcport->loop_id,
4168                             fcport->d_id.b.domain, fcport->d_id.b.area,
4169                             fcport->d_id.b.al_pa);
4170                         qla2x00_clear_loop_id(fcport);
4171                         fcport->login_retry = 0;
4172
4173                         rval = 3;
4174                         break;
4175                 }
4176         }
4177
4178         return (rval);
4179 }
4180
4181 /*
4182  * qla2x00_local_device_login
4183  *      Issue local device login command.
4184  *
4185  * Input:
4186  *      ha = adapter block pointer.
4187  *      loop_id = loop id of device to login to.
4188  *
4189  * Returns (Where's the #define!!!!):
4190  *      0 - Login successfully
4191  *      1 - Login failed
4192  *      3 - Fatal error
4193  */
4194 int
4195 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
4196 {
4197         int             rval;
4198         uint16_t        mb[MAILBOX_REGISTER_COUNT];
4199
4200         memset(mb, 0, sizeof(mb));
4201         rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
4202         if (rval == QLA_SUCCESS) {
4203                 /* Interrogate mailbox registers for any errors */
4204                 if (mb[0] == MBS_COMMAND_ERROR)
4205                         rval = 1;
4206                 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
4207                         /* device not in PCB table */
4208                         rval = 3;
4209         }
4210
4211         return (rval);
4212 }
4213
4214 /*
4215  *  qla2x00_loop_resync
4216  *      Resync with fibre channel devices.
4217  *
4218  * Input:
4219  *      ha = adapter block pointer.
4220  *
4221  * Returns:
4222  *      0 = success
4223  */
4224 int
4225 qla2x00_loop_resync(scsi_qla_host_t *vha)
4226 {
4227         int rval = QLA_SUCCESS;
4228         uint32_t wait_time;
4229         struct req_que *req;
4230         struct rsp_que *rsp;
4231
4232         if (vha->hw->flags.cpu_affinity_enabled)
4233                 req = vha->hw->req_q_map[0];
4234         else
4235                 req = vha->req;
4236         rsp = req->rsp;
4237
4238         clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4239         if (vha->flags.online) {
4240                 if (!(rval = qla2x00_fw_ready(vha))) {
4241                         /* Wait at most MAX_TARGET RSCNs for a stable link. */
4242                         wait_time = 256;
4243                         do {
4244                                 if (!IS_QLAFX00(vha->hw)) {
4245                                         /*
4246                                          * Issue a marker after FW becomes
4247                                          * ready.
4248                                          */
4249                                         qla2x00_marker(vha, req, rsp, 0, 0,
4250                                                 MK_SYNC_ALL);
4251                                         vha->marker_needed = 0;
4252                                 }
4253
4254                                 /* Remap devices on Loop. */
4255                                 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4256
4257                                 if (IS_QLAFX00(vha->hw))
4258                                         qlafx00_configure_devices(vha);
4259                                 else
4260                                         qla2x00_configure_loop(vha);
4261
4262                                 wait_time--;
4263                         } while (!atomic_read(&vha->loop_down_timer) &&
4264                                 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4265                                 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
4266                                 &vha->dpc_flags)));
4267                 }
4268         }
4269
4270         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4271                 return (QLA_FUNCTION_FAILED);
4272
4273         if (rval)
4274                 ql_dbg(ql_dbg_disc, vha, 0x206c,
4275                     "%s *** FAILED ***.\n", __func__);
4276
4277         return (rval);
4278 }
4279
4280 /*
4281 * qla2x00_perform_loop_resync
4282 * Description: This function will set the appropriate flags and call
4283 *              qla2x00_loop_resync. If successful loop will be resynced
4284 * Arguments : scsi_qla_host_t pointer
4285 * returm    : Success or Failure
4286 */
4287
4288 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
4289 {
4290         int32_t rval = 0;
4291
4292         if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
4293                 /*Configure the flags so that resync happens properly*/
4294                 atomic_set(&ha->loop_down_timer, 0);
4295                 if (!(ha->device_flags & DFLG_NO_CABLE)) {
4296                         atomic_set(&ha->loop_state, LOOP_UP);
4297                         set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
4298                         set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
4299                         set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
4300
4301                         rval = qla2x00_loop_resync(ha);
4302                 } else
4303                         atomic_set(&ha->loop_state, LOOP_DEAD);
4304
4305                 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
4306         }
4307
4308         return rval;
4309 }
4310
4311 void
4312 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
4313 {
4314         fc_port_t *fcport;
4315         struct scsi_qla_host *vha;
4316         struct qla_hw_data *ha = base_vha->hw;
4317         unsigned long flags;
4318
4319         spin_lock_irqsave(&ha->vport_slock, flags);
4320         /* Go with deferred removal of rport references. */
4321         list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
4322                 atomic_inc(&vha->vref_count);
4323                 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4324                         if (fcport->drport &&
4325                             atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
4326                                 spin_unlock_irqrestore(&ha->vport_slock, flags);
4327                                 qla2x00_rport_del(fcport);
4328
4329                                 /*
4330                                  * Release the target mode FC NEXUS in
4331                                  * qla_target.c, if target mod is enabled.
4332                                  */
4333                                 qlt_fc_port_deleted(vha, fcport,
4334                                     base_vha->total_fcport_update_gen);
4335
4336                                 spin_lock_irqsave(&ha->vport_slock, flags);
4337                         }
4338                 }
4339                 atomic_dec(&vha->vref_count);
4340         }
4341         spin_unlock_irqrestore(&ha->vport_slock, flags);
4342 }
4343
4344 /* Assumes idc_lock always held on entry */
4345 void
4346 qla83xx_reset_ownership(scsi_qla_host_t *vha)
4347 {
4348         struct qla_hw_data *ha = vha->hw;
4349         uint32_t drv_presence, drv_presence_mask;
4350         uint32_t dev_part_info1, dev_part_info2, class_type;
4351         uint32_t class_type_mask = 0x3;
4352         uint16_t fcoe_other_function = 0xffff, i;
4353
4354         if (IS_QLA8044(ha)) {
4355                 drv_presence = qla8044_rd_direct(vha,
4356                     QLA8044_CRB_DRV_ACTIVE_INDEX);
4357                 dev_part_info1 = qla8044_rd_direct(vha,
4358                     QLA8044_CRB_DEV_PART_INFO_INDEX);
4359                 dev_part_info2 = qla8044_rd_direct(vha,
4360                     QLA8044_CRB_DEV_PART_INFO2);
4361         } else {
4362                 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4363                 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
4364                 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
4365         }
4366         for (i = 0; i < 8; i++) {
4367                 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
4368                 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
4369                     (i != ha->portnum)) {
4370                         fcoe_other_function = i;
4371                         break;
4372                 }
4373         }
4374         if (fcoe_other_function == 0xffff) {
4375                 for (i = 0; i < 8; i++) {
4376                         class_type = ((dev_part_info2 >> (i * 4)) &
4377                             class_type_mask);
4378                         if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
4379                             ((i + 8) != ha->portnum)) {
4380                                 fcoe_other_function = i + 8;
4381                                 break;
4382                         }
4383                 }
4384         }
4385         /*
4386          * Prepare drv-presence mask based on fcoe functions present.
4387          * However consider only valid physical fcoe function numbers (0-15).
4388          */
4389         drv_presence_mask = ~((1 << (ha->portnum)) |
4390                         ((fcoe_other_function == 0xffff) ?
4391                          0 : (1 << (fcoe_other_function))));
4392
4393         /* We are the reset owner iff:
4394          *    - No other protocol drivers present.
4395          *    - This is the lowest among fcoe functions. */
4396         if (!(drv_presence & drv_presence_mask) &&
4397                         (ha->portnum < fcoe_other_function)) {
4398                 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
4399                     "This host is Reset owner.\n");
4400                 ha->flags.nic_core_reset_owner = 1;
4401         }
4402 }
4403
4404 static int
4405 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
4406 {
4407         int rval = QLA_SUCCESS;
4408         struct qla_hw_data *ha = vha->hw;
4409         uint32_t drv_ack;
4410
4411         rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
4412         if (rval == QLA_SUCCESS) {
4413                 drv_ack |= (1 << ha->portnum);
4414                 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
4415         }
4416
4417         return rval;
4418 }
4419
4420 static int
4421 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
4422 {
4423         int rval = QLA_SUCCESS;
4424         struct qla_hw_data *ha = vha->hw;
4425         uint32_t drv_ack;
4426
4427         rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
4428         if (rval == QLA_SUCCESS) {
4429                 drv_ack &= ~(1 << ha->portnum);
4430                 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
4431         }
4432
4433         return rval;
4434 }
4435
4436 static const char *
4437 qla83xx_dev_state_to_string(uint32_t dev_state)
4438 {
4439         switch (dev_state) {
4440         case QLA8XXX_DEV_COLD:
4441                 return "COLD/RE-INIT";
4442         case QLA8XXX_DEV_INITIALIZING:
4443                 return "INITIALIZING";
4444         case QLA8XXX_DEV_READY:
4445                 return "READY";
4446         case QLA8XXX_DEV_NEED_RESET:
4447                 return "NEED RESET";
4448         case QLA8XXX_DEV_NEED_QUIESCENT:
4449                 return "NEED QUIESCENT";
4450         case QLA8XXX_DEV_FAILED:
4451                 return "FAILED";
4452         case QLA8XXX_DEV_QUIESCENT:
4453                 return "QUIESCENT";
4454         default:
4455                 return "Unknown";
4456         }
4457 }
4458
4459 /* Assumes idc-lock always held on entry */
4460 void
4461 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
4462 {
4463         struct qla_hw_data *ha = vha->hw;
4464         uint32_t idc_audit_reg = 0, duration_secs = 0;
4465
4466         switch (audit_type) {
4467         case IDC_AUDIT_TIMESTAMP:
4468                 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
4469                 idc_audit_reg = (ha->portnum) |
4470                     (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
4471                 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
4472                 break;
4473
4474         case IDC_AUDIT_COMPLETION:
4475                 duration_secs = ((jiffies_to_msecs(jiffies) -
4476                     jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
4477                 idc_audit_reg = (ha->portnum) |
4478                     (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
4479                 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
4480                 break;
4481
4482         default:
4483                 ql_log(ql_log_warn, vha, 0xb078,
4484                     "Invalid audit type specified.\n");
4485                 break;
4486         }
4487 }
4488
4489 /* Assumes idc_lock always held on entry */
4490 static int
4491 qla83xx_initiating_reset(scsi_qla_host_t *vha)
4492 {
4493         struct qla_hw_data *ha = vha->hw;
4494         uint32_t  idc_control, dev_state;
4495
4496         __qla83xx_get_idc_control(vha, &idc_control);
4497         if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
4498                 ql_log(ql_log_info, vha, 0xb080,
4499                     "NIC Core reset has been disabled. idc-control=0x%x\n",
4500                     idc_control);
4501                 return QLA_FUNCTION_FAILED;
4502         }
4503
4504         /* Set NEED-RESET iff in READY state and we are the reset-owner */
4505         qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4506         if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
4507                 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
4508                     QLA8XXX_DEV_NEED_RESET);
4509                 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
4510                 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
4511         } else {
4512                 const char *state = qla83xx_dev_state_to_string(dev_state);
4513                 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
4514
4515                 /* SV: XXX: Is timeout required here? */
4516                 /* Wait for IDC state change READY -> NEED_RESET */
4517                 while (dev_state == QLA8XXX_DEV_READY) {
4518                         qla83xx_idc_unlock(vha, 0);
4519                         msleep(200);
4520                         qla83xx_idc_lock(vha, 0);
4521                         qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4522                 }
4523         }
4524
4525         /* Send IDC ack by writing to drv-ack register */
4526         __qla83xx_set_drv_ack(vha);
4527
4528         return QLA_SUCCESS;
4529 }
4530
4531 int
4532 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
4533 {
4534         return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
4535 }
4536
4537 int
4538 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
4539 {
4540         return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
4541 }
4542
4543 static int
4544 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
4545 {
4546         uint32_t drv_presence = 0;
4547         struct qla_hw_data *ha = vha->hw;
4548
4549         qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4550         if (drv_presence & (1 << ha->portnum))
4551                 return QLA_SUCCESS;
4552         else
4553                 return QLA_TEST_FAILED;
4554 }
4555
4556 int
4557 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
4558 {
4559         int rval = QLA_SUCCESS;
4560         struct qla_hw_data *ha = vha->hw;
4561
4562         ql_dbg(ql_dbg_p3p, vha, 0xb058,
4563             "Entered  %s().\n", __func__);
4564
4565         if (vha->device_flags & DFLG_DEV_FAILED) {
4566                 ql_log(ql_log_warn, vha, 0xb059,
4567                     "Device in unrecoverable FAILED state.\n");
4568                 return QLA_FUNCTION_FAILED;
4569         }
4570
4571         qla83xx_idc_lock(vha, 0);
4572
4573         if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
4574                 ql_log(ql_log_warn, vha, 0xb05a,
4575                     "Function=0x%x has been removed from IDC participation.\n",
4576                     ha->portnum);
4577                 rval = QLA_FUNCTION_FAILED;
4578                 goto exit;
4579         }
4580
4581         qla83xx_reset_ownership(vha);
4582
4583         rval = qla83xx_initiating_reset(vha);
4584
4585         /*
4586          * Perform reset if we are the reset-owner,
4587          * else wait till IDC state changes to READY/FAILED.
4588          */
4589         if (rval == QLA_SUCCESS) {
4590                 rval = qla83xx_idc_state_handler(vha);
4591
4592                 if (rval == QLA_SUCCESS)
4593                         ha->flags.nic_core_hung = 0;
4594                 __qla83xx_clear_drv_ack(vha);
4595         }
4596
4597 exit:
4598         qla83xx_idc_unlock(vha, 0);
4599
4600         ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
4601
4602         return rval;
4603 }
4604
4605 int
4606 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
4607 {
4608         struct qla_hw_data *ha = vha->hw;
4609         int rval = QLA_FUNCTION_FAILED;
4610
4611         if (!IS_MCTP_CAPABLE(ha)) {
4612                 /* This message can be removed from the final version */
4613                 ql_log(ql_log_info, vha, 0x506d,
4614                     "This board is not MCTP capable\n");
4615                 return rval;
4616         }
4617
4618         if (!ha->mctp_dump) {
4619                 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
4620                     MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
4621
4622                 if (!ha->mctp_dump) {
4623                         ql_log(ql_log_warn, vha, 0x506e,
4624                             "Failed to allocate memory for mctp dump\n");
4625                         return rval;
4626                 }
4627         }
4628
4629 #define MCTP_DUMP_STR_ADDR      0x00000000
4630         rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
4631             MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
4632         if (rval != QLA_SUCCESS) {
4633                 ql_log(ql_log_warn, vha, 0x506f,
4634                     "Failed to capture mctp dump\n");
4635         } else {
4636                 ql_log(ql_log_info, vha, 0x5070,
4637                     "Mctp dump capture for host (%ld/%p).\n",
4638                     vha->host_no, ha->mctp_dump);
4639                 ha->mctp_dumped = 1;
4640         }
4641
4642         if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
4643                 ha->flags.nic_core_reset_hdlr_active = 1;
4644                 rval = qla83xx_restart_nic_firmware(vha);
4645                 if (rval)
4646                         /* NIC Core reset failed. */
4647                         ql_log(ql_log_warn, vha, 0x5071,
4648                             "Failed to restart nic firmware\n");
4649                 else
4650                         ql_dbg(ql_dbg_p3p, vha, 0xb084,
4651                             "Restarted NIC firmware successfully.\n");
4652                 ha->flags.nic_core_reset_hdlr_active = 0;
4653         }
4654
4655         return rval;
4656
4657 }
4658
4659 /*
4660 * qla2x00_quiesce_io
4661 * Description: This function will block the new I/Os
4662 *              Its not aborting any I/Os as context
4663 *              is not destroyed during quiescence
4664 * Arguments: scsi_qla_host_t
4665 * return   : void
4666 */
4667 void
4668 qla2x00_quiesce_io(scsi_qla_host_t *vha)
4669 {
4670         struct qla_hw_data *ha = vha->hw;
4671         struct scsi_qla_host *vp;
4672
4673         ql_dbg(ql_dbg_dpc, vha, 0x401d,
4674             "Quiescing I/O - ha=%p.\n", ha);
4675
4676         atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
4677         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
4678                 atomic_set(&vha->loop_state, LOOP_DOWN);
4679                 qla2x00_mark_all_devices_lost(vha, 0);
4680                 list_for_each_entry(vp, &ha->vp_list, list)
4681                         qla2x00_mark_all_devices_lost(vp, 0);
4682         } else {
4683                 if (!atomic_read(&vha->loop_down_timer))
4684                         atomic_set(&vha->loop_down_timer,
4685                                         LOOP_DOWN_TIME);
4686         }
4687         /* Wait for pending cmds to complete */
4688         qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
4689 }
4690
4691 void
4692 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
4693 {
4694         struct qla_hw_data *ha = vha->hw;
4695         struct scsi_qla_host *vp;
4696         unsigned long flags;
4697         fc_port_t *fcport;
4698
4699         /* For ISP82XX, driver waits for completion of the commands.
4700          * online flag should be set.
4701          */
4702         if (!(IS_P3P_TYPE(ha)))
4703                 vha->flags.online = 0;
4704         ha->flags.chip_reset_done = 0;
4705         clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4706         vha->qla_stats.total_isp_aborts++;
4707
4708         ql_log(ql_log_info, vha, 0x00af,
4709             "Performing ISP error recovery - ha=%p.\n", ha);
4710
4711         /* For ISP82XX, reset_chip is just disabling interrupts.
4712          * Driver waits for the completion of the commands.
4713          * the interrupts need to be enabled.
4714          */
4715         if (!(IS_P3P_TYPE(ha)))
4716                 ha->isp_ops->reset_chip(vha);
4717
4718         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
4719         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
4720                 atomic_set(&vha->loop_state, LOOP_DOWN);
4721                 qla2x00_mark_all_devices_lost(vha, 0);
4722
4723                 spin_lock_irqsave(&ha->vport_slock, flags);
4724                 list_for_each_entry(vp, &ha->vp_list, list) {
4725                         atomic_inc(&vp->vref_count);
4726                         spin_unlock_irqrestore(&ha->vport_slock, flags);
4727
4728                         qla2x00_mark_all_devices_lost(vp, 0);
4729
4730                         spin_lock_irqsave(&ha->vport_slock, flags);
4731                         atomic_dec(&vp->vref_count);
4732                 }
4733                 spin_unlock_irqrestore(&ha->vport_slock, flags);
4734         } else {
4735                 if (!atomic_read(&vha->loop_down_timer))
4736                         atomic_set(&vha->loop_down_timer,
4737                             LOOP_DOWN_TIME);
4738         }
4739
4740         /* Clear all async request states across all VPs. */
4741         list_for_each_entry(fcport, &vha->vp_fcports, list)
4742                 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4743         spin_lock_irqsave(&ha->vport_slock, flags);
4744         list_for_each_entry(vp, &ha->vp_list, list) {
4745                 atomic_inc(&vp->vref_count);
4746                 spin_unlock_irqrestore(&ha->vport_slock, flags);
4747
4748                 list_for_each_entry(fcport, &vp->vp_fcports, list)
4749                         fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4750
4751                 spin_lock_irqsave(&ha->vport_slock, flags);
4752                 atomic_dec(&vp->vref_count);
4753         }
4754         spin_unlock_irqrestore(&ha->vport_slock, flags);
4755
4756         if (!ha->flags.eeh_busy) {
4757                 /* Make sure for ISP 82XX IO DMA is complete */
4758                 if (IS_P3P_TYPE(ha)) {
4759                         qla82xx_chip_reset_cleanup(vha);
4760                         ql_log(ql_log_info, vha, 0x00b4,
4761                             "Done chip reset cleanup.\n");
4762
4763                         /* Done waiting for pending commands.
4764                          * Reset the online flag.
4765                          */
4766                         vha->flags.online = 0;
4767                 }
4768
4769                 /* Requeue all commands in outstanding command list. */
4770                 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
4771         }
4772
4773         ha->chip_reset++;
4774         /* memory barrier */
4775         wmb();
4776 }
4777
4778 /*
4779 *  qla2x00_abort_isp
4780 *      Resets ISP and aborts all outstanding commands.
4781 *
4782 * Input:
4783 *      ha           = adapter block pointer.
4784 *
4785 * Returns:
4786 *      0 = success
4787 */
4788 int
4789 qla2x00_abort_isp(scsi_qla_host_t *vha)
4790 {
4791         int rval;
4792         uint8_t        status = 0;
4793         struct qla_hw_data *ha = vha->hw;
4794         struct scsi_qla_host *vp;
4795         struct req_que *req = ha->req_q_map[0];
4796         unsigned long flags;
4797
4798         if (vha->flags.online) {
4799                 qla2x00_abort_isp_cleanup(vha);
4800
4801                 if (IS_QLA8031(ha)) {
4802                         ql_dbg(ql_dbg_p3p, vha, 0xb05c,
4803                             "Clearing fcoe driver presence.\n");
4804                         if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
4805                                 ql_dbg(ql_dbg_p3p, vha, 0xb073,
4806                                     "Error while clearing DRV-Presence.\n");
4807                 }
4808
4809                 if (unlikely(pci_channel_offline(ha->pdev) &&
4810                     ha->flags.pci_channel_io_perm_failure)) {
4811                         clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4812                         status = 0;
4813                         return status;
4814                 }
4815
4816                 ha->isp_ops->get_flash_version(vha, req->ring);
4817
4818                 ha->isp_ops->nvram_config(vha);
4819
4820                 if (!qla2x00_restart_isp(vha)) {
4821                         clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4822
4823                         if (!atomic_read(&vha->loop_down_timer)) {
4824                                 /*
4825                                  * Issue marker command only when we are going
4826                                  * to start the I/O .
4827                                  */
4828                                 vha->marker_needed = 1;
4829                         }
4830
4831                         vha->flags.online = 1;
4832
4833                         ha->isp_ops->enable_intrs(ha);
4834
4835                         ha->isp_abort_cnt = 0;
4836                         clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4837
4838                         if (IS_QLA81XX(ha) || IS_QLA8031(ha))
4839                                 qla2x00_get_fw_version(vha);
4840                         if (ha->fce) {
4841                                 ha->flags.fce_enabled = 1;
4842                                 memset(ha->fce, 0,
4843                                     fce_calc_size(ha->fce_bufs));
4844                                 rval = qla2x00_enable_fce_trace(vha,
4845                                     ha->fce_dma, ha->fce_bufs, ha->fce_mb,
4846                                     &ha->fce_bufs);
4847                                 if (rval) {
4848                                         ql_log(ql_log_warn, vha, 0x8033,
4849                                             "Unable to reinitialize FCE "
4850                                             "(%d).\n", rval);
4851                                         ha->flags.fce_enabled = 0;
4852                                 }
4853                         }
4854
4855                         if (ha->eft) {
4856                                 memset(ha->eft, 0, EFT_SIZE);
4857                                 rval = qla2x00_enable_eft_trace(vha,
4858                                     ha->eft_dma, EFT_NUM_BUFFERS);
4859                                 if (rval) {
4860                                         ql_log(ql_log_warn, vha, 0x8034,
4861                                             "Unable to reinitialize EFT "
4862                                             "(%d).\n", rval);
4863                                 }
4864                         }
4865                 } else {        /* failed the ISP abort */
4866                         vha->flags.online = 1;
4867                         if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
4868                                 if (ha->isp_abort_cnt == 0) {
4869                                         ql_log(ql_log_fatal, vha, 0x8035,
4870                                             "ISP error recover failed - "
4871                                             "board disabled.\n");
4872                                         /*
4873                                          * The next call disables the board
4874                                          * completely.
4875                                          */
4876                                         ha->isp_ops->reset_adapter(vha);
4877                                         vha->flags.online = 0;
4878                                         clear_bit(ISP_ABORT_RETRY,
4879                                             &vha->dpc_flags);
4880                                         status = 0;
4881                                 } else { /* schedule another ISP abort */
4882                                         ha->isp_abort_cnt--;
4883                                         ql_dbg(ql_dbg_taskm, vha, 0x8020,
4884                                             "ISP abort - retry remaining %d.\n",
4885                                             ha->isp_abort_cnt);
4886                                         status = 1;
4887                                 }
4888                         } else {
4889                                 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
4890                                 ql_dbg(ql_dbg_taskm, vha, 0x8021,
4891                                     "ISP error recovery - retrying (%d) "
4892                                     "more times.\n", ha->isp_abort_cnt);
4893                                 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4894                                 status = 1;
4895                         }
4896                 }
4897
4898         }
4899
4900         if (!status) {
4901                 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
4902
4903                 spin_lock_irqsave(&ha->vport_slock, flags);
4904                 list_for_each_entry(vp, &ha->vp_list, list) {
4905                         if (vp->vp_idx) {
4906                                 atomic_inc(&vp->vref_count);
4907                                 spin_unlock_irqrestore(&ha->vport_slock, flags);
4908
4909                                 qla2x00_vp_abort_isp(vp);
4910
4911                                 spin_lock_irqsave(&ha->vport_slock, flags);
4912                                 atomic_dec(&vp->vref_count);
4913                         }
4914                 }
4915                 spin_unlock_irqrestore(&ha->vport_slock, flags);
4916
4917                 if (IS_QLA8031(ha)) {
4918                         ql_dbg(ql_dbg_p3p, vha, 0xb05d,
4919                             "Setting back fcoe driver presence.\n");
4920                         if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
4921                                 ql_dbg(ql_dbg_p3p, vha, 0xb074,
4922                                     "Error while setting DRV-Presence.\n");
4923                 }
4924         } else {
4925                 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
4926                        __func__);
4927         }
4928
4929         return(status);
4930 }
4931
4932 /*
4933 *  qla2x00_restart_isp
4934 *      restarts the ISP after a reset
4935 *
4936 * Input:
4937 *      ha = adapter block pointer.
4938 *
4939 * Returns:
4940 *      0 = success
4941 */
4942 static int
4943 qla2x00_restart_isp(scsi_qla_host_t *vha)
4944 {
4945         int status = 0;
4946         struct qla_hw_data *ha = vha->hw;
4947         struct req_que *req = ha->req_q_map[0];
4948         struct rsp_que *rsp = ha->rsp_q_map[0];
4949
4950         /* If firmware needs to be loaded */
4951         if (qla2x00_isp_firmware(vha)) {
4952                 vha->flags.online = 0;
4953                 status = ha->isp_ops->chip_diag(vha);
4954                 if (!status)
4955                         status = qla2x00_setup_chip(vha);
4956         }
4957
4958         if (!status && !(status = qla2x00_init_rings(vha))) {
4959                 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4960                 ha->flags.chip_reset_done = 1;
4961
4962                 /* Initialize the queues in use */
4963                 qla25xx_init_queues(ha);
4964
4965                 status = qla2x00_fw_ready(vha);
4966                 if (!status) {
4967                         /* Issue a marker after FW becomes ready. */
4968                         qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4969
4970                         set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4971                 }
4972
4973                 /* if no cable then assume it's good */
4974                 if ((vha->device_flags & DFLG_NO_CABLE))
4975                         status = 0;
4976         }
4977         return (status);
4978 }
4979
4980 static int
4981 qla25xx_init_queues(struct qla_hw_data *ha)
4982 {
4983         struct rsp_que *rsp = NULL;
4984         struct req_que *req = NULL;
4985         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4986         int ret = -1;
4987         int i;
4988
4989         for (i = 1; i < ha->max_rsp_queues; i++) {
4990                 rsp = ha->rsp_q_map[i];
4991                 if (rsp && test_bit(i, ha->rsp_qid_map)) {
4992                         rsp->options &= ~BIT_0;
4993                         ret = qla25xx_init_rsp_que(base_vha, rsp);
4994                         if (ret != QLA_SUCCESS)
4995                                 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
4996                                     "%s Rsp que: %d init failed.\n",
4997                                     __func__, rsp->id);
4998                         else
4999                                 ql_dbg(ql_dbg_init, base_vha, 0x0100,
5000                                     "%s Rsp que: %d inited.\n",
5001                                     __func__, rsp->id);
5002                 }
5003         }
5004         for (i = 1; i < ha->max_req_queues; i++) {
5005                 req = ha->req_q_map[i];
5006                 if (req && test_bit(i, ha->req_qid_map)) {
5007                         /* Clear outstanding commands array. */
5008                         req->options &= ~BIT_0;
5009                         ret = qla25xx_init_req_que(base_vha, req);
5010                         if (ret != QLA_SUCCESS)
5011                                 ql_dbg(ql_dbg_init, base_vha, 0x0101,
5012                                     "%s Req que: %d init failed.\n",
5013                                     __func__, req->id);
5014                         else
5015                                 ql_dbg(ql_dbg_init, base_vha, 0x0102,
5016                                     "%s Req que: %d inited.\n",
5017                                     __func__, req->id);
5018                 }
5019         }
5020         return ret;
5021 }
5022
5023 /*
5024 * qla2x00_reset_adapter
5025 *      Reset adapter.
5026 *
5027 * Input:
5028 *      ha = adapter block pointer.
5029 */
5030 void
5031 qla2x00_reset_adapter(scsi_qla_host_t *vha)
5032 {
5033         unsigned long flags = 0;
5034         struct qla_hw_data *ha = vha->hw;
5035         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
5036
5037         vha->flags.online = 0;
5038         ha->isp_ops->disable_intrs(ha);
5039
5040         spin_lock_irqsave(&ha->hardware_lock, flags);
5041         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
5042         RD_REG_WORD(&reg->hccr);                        /* PCI Posting. */
5043         WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
5044         RD_REG_WORD(&reg->hccr);                        /* PCI Posting. */
5045         spin_unlock_irqrestore(&ha->hardware_lock, flags);
5046 }
5047
5048 void
5049 qla24xx_reset_adapter(scsi_qla_host_t *vha)
5050 {
5051         unsigned long flags = 0;
5052         struct qla_hw_data *ha = vha->hw;
5053         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5054
5055         if (IS_P3P_TYPE(ha))
5056                 return;
5057
5058         vha->flags.online = 0;
5059         ha->isp_ops->disable_intrs(ha);
5060
5061         spin_lock_irqsave(&ha->hardware_lock, flags);
5062         WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
5063         RD_REG_DWORD(&reg->hccr);
5064         WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
5065         RD_REG_DWORD(&reg->hccr);
5066         spin_unlock_irqrestore(&ha->hardware_lock, flags);
5067
5068         if (IS_NOPOLLING_TYPE(ha))
5069                 ha->isp_ops->enable_intrs(ha);
5070 }
5071
5072 /* On sparc systems, obtain port and node WWN from firmware
5073  * properties.
5074  */
5075 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
5076         struct nvram_24xx *nv)
5077 {
5078 #ifdef CONFIG_SPARC
5079         struct qla_hw_data *ha = vha->hw;
5080         struct pci_dev *pdev = ha->pdev;
5081         struct device_node *dp = pci_device_to_OF_node(pdev);
5082         const u8 *val;
5083         int len;
5084
5085         val = of_get_property(dp, "port-wwn", &len);
5086         if (val && len >= WWN_SIZE)
5087                 memcpy(nv->port_name, val, WWN_SIZE);
5088
5089         val = of_get_property(dp, "node-wwn", &len);
5090         if (val && len >= WWN_SIZE)
5091                 memcpy(nv->node_name, val, WWN_SIZE);
5092 #endif
5093 }
5094
5095 int
5096 qla24xx_nvram_config(scsi_qla_host_t *vha)
5097 {
5098         int   rval;
5099         struct init_cb_24xx *icb;
5100         struct nvram_24xx *nv;
5101         uint32_t *dptr;
5102         uint8_t  *dptr1, *dptr2;
5103         uint32_t chksum;
5104         uint16_t cnt;
5105         struct qla_hw_data *ha = vha->hw;
5106
5107         rval = QLA_SUCCESS;
5108         icb = (struct init_cb_24xx *)ha->init_cb;
5109         nv = ha->nvram;
5110
5111         /* Determine NVRAM starting address. */
5112         if (ha->port_no == 0) {
5113                 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
5114                 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
5115         } else {
5116                 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
5117                 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
5118         }
5119
5120         ha->nvram_size = sizeof(struct nvram_24xx);
5121         ha->vpd_size = FA_NVRAM_VPD_SIZE;
5122
5123         /* Get VPD data into cache */
5124         ha->vpd = ha->nvram + VPD_OFFSET;
5125         ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
5126             ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
5127
5128         /* Get NVRAM data into cache and calculate checksum. */
5129         dptr = (uint32_t *)nv;
5130         ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
5131             ha->nvram_size);
5132         for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
5133                 chksum += le32_to_cpu(*dptr);
5134
5135         ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
5136             "Contents of NVRAM\n");
5137         ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
5138             (uint8_t *)nv, ha->nvram_size);
5139
5140         /* Bad NVRAM data, set defaults parameters. */
5141         if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5142             || nv->id[3] != ' ' ||
5143             nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
5144                 /* Reset NVRAM data. */
5145                 ql_log(ql_log_warn, vha, 0x006b,
5146                     "Inconsistent NVRAM detected: checksum=0x%x id=%c "
5147                     "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
5148                 ql_log(ql_log_warn, vha, 0x006c,
5149                     "Falling back to functioning (yet invalid -- WWPN) "
5150                     "defaults.\n");
5151
5152                 /*
5153                  * Set default initialization control block.
5154                  */
5155                 memset(nv, 0, ha->nvram_size);
5156                 nv->nvram_version = cpu_to_le16(ICB_VERSION);
5157                 nv->version = cpu_to_le16(ICB_VERSION);
5158                 nv->frame_payload_size = 2048;
5159                 nv->execution_throttle = cpu_to_le16(0xFFFF);
5160                 nv->exchange_count = cpu_to_le16(0);
5161                 nv->hard_address = cpu_to_le16(124);
5162                 nv->port_name[0] = 0x21;
5163                 nv->port_name[1] = 0x00 + ha->port_no + 1;
5164                 nv->port_name[2] = 0x00;
5165                 nv->port_name[3] = 0xe0;
5166                 nv->port_name[4] = 0x8b;
5167                 nv->port_name[5] = 0x1c;
5168                 nv->port_name[6] = 0x55;
5169                 nv->port_name[7] = 0x86;
5170                 nv->node_name[0] = 0x20;
5171                 nv->node_name[1] = 0x00;
5172                 nv->node_name[2] = 0x00;
5173                 nv->node_name[3] = 0xe0;
5174                 nv->node_name[4] = 0x8b;
5175                 nv->node_name[5] = 0x1c;
5176                 nv->node_name[6] = 0x55;
5177                 nv->node_name[7] = 0x86;
5178                 qla24xx_nvram_wwn_from_ofw(vha, nv);
5179                 nv->login_retry_count = cpu_to_le16(8);
5180                 nv->interrupt_delay_timer = cpu_to_le16(0);
5181                 nv->login_timeout = cpu_to_le16(0);
5182                 nv->firmware_options_1 =
5183                     cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
5184                 nv->firmware_options_2 = cpu_to_le32(2 << 4);
5185                 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
5186                 nv->firmware_options_3 = cpu_to_le32(2 << 13);
5187                 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
5188                 nv->efi_parameters = cpu_to_le32(0);
5189                 nv->reset_delay = 5;
5190                 nv->max_luns_per_target = cpu_to_le16(128);
5191                 nv->port_down_retry_count = cpu_to_le16(30);
5192                 nv->link_down_timeout = cpu_to_le16(30);
5193
5194                 rval = 1;
5195         }
5196
5197         if (!qla_ini_mode_enabled(vha)) {
5198                 /* Don't enable full login after initial LIP */
5199                 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
5200                 /* Don't enable LIP full login for initiator */
5201                 nv->host_p &= cpu_to_le32(~BIT_10);
5202         }
5203
5204         qlt_24xx_config_nvram_stage1(vha, nv);
5205
5206         /* Reset Initialization control block */
5207         memset(icb, 0, ha->init_cb_size);
5208
5209         /* Copy 1st segment. */
5210         dptr1 = (uint8_t *)icb;
5211         dptr2 = (uint8_t *)&nv->version;
5212         cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
5213         while (cnt--)
5214                 *dptr1++ = *dptr2++;
5215
5216         icb->login_retry_count = nv->login_retry_count;
5217         icb->link_down_on_nos = nv->link_down_on_nos;
5218
5219         /* Copy 2nd segment. */
5220         dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
5221         dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
5222         cnt = (uint8_t *)&icb->reserved_3 -
5223             (uint8_t *)&icb->interrupt_delay_timer;
5224         while (cnt--)
5225                 *dptr1++ = *dptr2++;
5226
5227         /*
5228          * Setup driver NVRAM options.
5229          */
5230         qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
5231             "QLA2462");
5232
5233         qlt_24xx_config_nvram_stage2(vha, icb);
5234
5235         if (nv->host_p & cpu_to_le32(BIT_15)) {
5236                 /* Use alternate WWN? */
5237                 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5238                 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5239         }
5240
5241         /* Prepare nodename */
5242         if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
5243                 /*
5244                  * Firmware will apply the following mask if the nodename was
5245                  * not provided.
5246                  */
5247                 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5248                 icb->node_name[0] &= 0xF0;
5249         }
5250
5251         /* Set host adapter parameters. */
5252         ha->flags.disable_risc_code_load = 0;
5253         ha->flags.enable_lip_reset = 0;
5254         ha->flags.enable_lip_full_login =
5255             le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
5256         ha->flags.enable_target_reset =
5257             le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
5258         ha->flags.enable_led_scheme = 0;
5259         ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
5260
5261         ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
5262             (BIT_6 | BIT_5 | BIT_4)) >> 4;
5263
5264         memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
5265             sizeof(ha->fw_seriallink_options24));
5266
5267         /* save HBA serial number */
5268         ha->serial0 = icb->port_name[5];
5269         ha->serial1 = icb->port_name[6];
5270         ha->serial2 = icb->port_name[7];
5271         memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5272         memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5273
5274         icb->execution_throttle = cpu_to_le16(0xFFFF);
5275
5276         ha->retry_count = le16_to_cpu(nv->login_retry_count);
5277
5278         /* Set minimum login_timeout to 4 seconds. */
5279         if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
5280                 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
5281         if (le16_to_cpu(nv->login_timeout) < 4)
5282                 nv->login_timeout = cpu_to_le16(4);
5283         ha->login_timeout = le16_to_cpu(nv->login_timeout);
5284
5285         /* Set minimum RATOV to 100 tenths of a second. */
5286         ha->r_a_tov = 100;
5287
5288         ha->loop_reset_delay = nv->reset_delay;
5289
5290         /* Link Down Timeout = 0:
5291          *
5292          *      When Port Down timer expires we will start returning
5293          *      I/O's to OS with "DID_NO_CONNECT".
5294          *
5295          * Link Down Timeout != 0:
5296          *
5297          *       The driver waits for the link to come up after link down
5298          *       before returning I/Os to OS with "DID_NO_CONNECT".
5299          */
5300         if (le16_to_cpu(nv->link_down_timeout) == 0) {
5301                 ha->loop_down_abort_time =
5302                     (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5303         } else {
5304                 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
5305                 ha->loop_down_abort_time =
5306                     (LOOP_DOWN_TIME - ha->link_down_timeout);
5307         }
5308
5309         /* Need enough time to try and get the port back. */
5310         ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
5311         if (qlport_down_retry)
5312                 ha->port_down_retry_count = qlport_down_retry;
5313
5314         /* Set login_retry_count */
5315         ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
5316         if (ha->port_down_retry_count ==
5317             le16_to_cpu(nv->port_down_retry_count) &&
5318             ha->port_down_retry_count > 3)
5319                 ha->login_retry_count = ha->port_down_retry_count;
5320         else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5321                 ha->login_retry_count = ha->port_down_retry_count;
5322         if (ql2xloginretrycount)
5323                 ha->login_retry_count = ql2xloginretrycount;
5324
5325         /* Enable ZIO. */
5326         if (!vha->flags.init_done) {
5327                 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
5328                     (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5329                 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
5330                     le16_to_cpu(icb->interrupt_delay_timer): 2;
5331         }
5332         icb->firmware_options_2 &= cpu_to_le32(
5333             ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
5334         vha->flags.process_response_queue = 0;
5335         if (ha->zio_mode != QLA_ZIO_DISABLED) {
5336                 ha->zio_mode = QLA_ZIO_MODE_6;
5337
5338                 ql_log(ql_log_info, vha, 0x006f,
5339                     "ZIO mode %d enabled; timer delay (%d us).\n",
5340                     ha->zio_mode, ha->zio_timer * 100);
5341
5342                 icb->firmware_options_2 |= cpu_to_le32(
5343                     (uint32_t)ha->zio_mode);
5344                 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
5345                 vha->flags.process_response_queue = 1;
5346         }
5347
5348         if (rval) {
5349                 ql_log(ql_log_warn, vha, 0x0070,
5350                     "NVRAM configuration failed.\n");
5351         }
5352         return (rval);
5353 }
5354
5355 uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
5356 {
5357         struct qla27xx_image_status pri_image_status, sec_image_status;
5358         uint8_t valid_pri_image, valid_sec_image;
5359         uint32_t *wptr;
5360         uint32_t cnt, chksum, size;
5361         struct qla_hw_data *ha = vha->hw;
5362
5363         valid_pri_image = valid_sec_image = 1;
5364         ha->active_image = 0;
5365         size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
5366
5367         if (!ha->flt_region_img_status_pri) {
5368                 valid_pri_image = 0;
5369                 goto check_sec_image;
5370         }
5371
5372         qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
5373             ha->flt_region_img_status_pri, size);
5374
5375         if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
5376                 ql_dbg(ql_dbg_init, vha, 0x018b,
5377                     "Primary image signature (0x%x) not valid\n",
5378                     pri_image_status.signature);
5379                 valid_pri_image = 0;
5380                 goto check_sec_image;
5381         }
5382
5383         wptr = (uint32_t *)(&pri_image_status);
5384         cnt = size;
5385
5386         for (chksum = 0; cnt--; wptr++)
5387                 chksum += le32_to_cpu(*wptr);
5388         if (chksum) {
5389                 ql_dbg(ql_dbg_init, vha, 0x018c,
5390                     "Checksum validation failed for primary image (0x%x)\n",
5391                     chksum);
5392                 valid_pri_image = 0;
5393         }
5394
5395 check_sec_image:
5396         if (!ha->flt_region_img_status_sec) {
5397                 valid_sec_image = 0;
5398                 goto check_valid_image;
5399         }
5400
5401         qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
5402             ha->flt_region_img_status_sec, size);
5403
5404         if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
5405                 ql_dbg(ql_dbg_init, vha, 0x018d,
5406                     "Secondary image signature(0x%x) not valid\n",
5407                     sec_image_status.signature);
5408                 valid_sec_image = 0;
5409                 goto check_valid_image;
5410         }
5411
5412         wptr = (uint32_t *)(&sec_image_status);
5413         cnt = size;
5414         for (chksum = 0; cnt--; wptr++)
5415                 chksum += le32_to_cpu(*wptr);
5416         if (chksum) {
5417                 ql_dbg(ql_dbg_init, vha, 0x018e,
5418                     "Checksum validation failed for secondary image (0x%x)\n",
5419                     chksum);
5420                 valid_sec_image = 0;
5421         }
5422
5423 check_valid_image:
5424         if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
5425                 ha->active_image = QLA27XX_PRIMARY_IMAGE;
5426         if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
5427                 if (!ha->active_image ||
5428                     pri_image_status.generation_number <
5429                     sec_image_status.generation_number)
5430                         ha->active_image = QLA27XX_SECONDARY_IMAGE;
5431         }
5432
5433         ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
5434             ha->active_image == 0 ? "default bootld and fw" :
5435             ha->active_image == 1 ? "primary" :
5436             ha->active_image == 2 ? "secondary" :
5437             "Invalid");
5438
5439         return ha->active_image;
5440 }
5441
5442 static int
5443 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
5444     uint32_t faddr)
5445 {
5446         int     rval = QLA_SUCCESS;
5447         int     segments, fragment;
5448         uint32_t *dcode, dlen;
5449         uint32_t risc_addr;
5450         uint32_t risc_size;
5451         uint32_t i;
5452         struct qla_hw_data *ha = vha->hw;
5453         struct req_que *req = ha->req_q_map[0];
5454
5455         ql_dbg(ql_dbg_init, vha, 0x008b,
5456             "FW: Loading firmware from flash (%x).\n", faddr);
5457
5458         rval = QLA_SUCCESS;
5459
5460         segments = FA_RISC_CODE_SEGMENTS;
5461         dcode = (uint32_t *)req->ring;
5462         *srisc_addr = 0;
5463
5464         if (IS_QLA27XX(ha) &&
5465             qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
5466                 faddr = ha->flt_region_fw_sec;
5467
5468         /* Validate firmware image by checking version. */
5469         qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
5470         for (i = 0; i < 4; i++)
5471                 dcode[i] = be32_to_cpu(dcode[i]);
5472         if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
5473             dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
5474             (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
5475                 dcode[3] == 0)) {
5476                 ql_log(ql_log_fatal, vha, 0x008c,
5477                     "Unable to verify the integrity of flash firmware "
5478                     "image.\n");
5479                 ql_log(ql_log_fatal, vha, 0x008d,
5480                     "Firmware data: %08x %08x %08x %08x.\n",
5481                     dcode[0], dcode[1], dcode[2], dcode[3]);
5482
5483                 return QLA_FUNCTION_FAILED;
5484         }
5485
5486         while (segments && rval == QLA_SUCCESS) {
5487                 /* Read segment's load information. */
5488                 qla24xx_read_flash_data(vha, dcode, faddr, 4);
5489
5490                 risc_addr = be32_to_cpu(dcode[2]);
5491                 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
5492                 risc_size = be32_to_cpu(dcode[3]);
5493
5494                 fragment = 0;
5495                 while (risc_size > 0 && rval == QLA_SUCCESS) {
5496                         dlen = (uint32_t)(ha->fw_transfer_size >> 2);
5497                         if (dlen > risc_size)
5498                                 dlen = risc_size;
5499
5500                         ql_dbg(ql_dbg_init, vha, 0x008e,
5501                             "Loading risc segment@ risc addr %x "
5502                             "number of dwords 0x%x offset 0x%x.\n",
5503                             risc_addr, dlen, faddr);
5504
5505                         qla24xx_read_flash_data(vha, dcode, faddr, dlen);
5506                         for (i = 0; i < dlen; i++)
5507                                 dcode[i] = swab32(dcode[i]);
5508
5509                         rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5510                             dlen);
5511                         if (rval) {
5512                                 ql_log(ql_log_fatal, vha, 0x008f,
5513                                     "Failed to load segment %d of firmware.\n",
5514                                     fragment);
5515                                 return QLA_FUNCTION_FAILED;
5516                         }
5517
5518                         faddr += dlen;
5519                         risc_addr += dlen;
5520                         risc_size -= dlen;
5521                         fragment++;
5522                 }
5523
5524                 /* Next segment. */
5525                 segments--;
5526         }
5527
5528         if (!IS_QLA27XX(ha))
5529                 return rval;
5530
5531         if (ha->fw_dump_template)
5532                 vfree(ha->fw_dump_template);
5533         ha->fw_dump_template = NULL;
5534         ha->fw_dump_template_len = 0;
5535
5536         ql_dbg(ql_dbg_init, vha, 0x0161,
5537             "Loading fwdump template from %x\n", faddr);
5538         qla24xx_read_flash_data(vha, dcode, faddr, 7);
5539         risc_size = be32_to_cpu(dcode[2]);
5540         ql_dbg(ql_dbg_init, vha, 0x0162,
5541             "-> array size %x dwords\n", risc_size);
5542         if (risc_size == 0 || risc_size == ~0)
5543                 goto default_template;
5544
5545         dlen = (risc_size - 8) * sizeof(*dcode);
5546         ql_dbg(ql_dbg_init, vha, 0x0163,
5547             "-> template allocating %x bytes...\n", dlen);
5548         ha->fw_dump_template = vmalloc(dlen);
5549         if (!ha->fw_dump_template) {
5550                 ql_log(ql_log_warn, vha, 0x0164,
5551                     "Failed fwdump template allocate %x bytes.\n", risc_size);
5552                 goto default_template;
5553         }
5554
5555         faddr += 7;
5556         risc_size -= 8;
5557         dcode = ha->fw_dump_template;
5558         qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
5559         for (i = 0; i < risc_size; i++)
5560                 dcode[i] = le32_to_cpu(dcode[i]);
5561
5562         if (!qla27xx_fwdt_template_valid(dcode)) {
5563                 ql_log(ql_log_warn, vha, 0x0165,
5564                     "Failed fwdump template validate\n");
5565                 goto default_template;
5566         }
5567
5568         dlen = qla27xx_fwdt_template_size(dcode);
5569         ql_dbg(ql_dbg_init, vha, 0x0166,
5570             "-> template size %x bytes\n", dlen);
5571         if (dlen > risc_size * sizeof(*dcode)) {
5572                 ql_log(ql_log_warn, vha, 0x0167,
5573                     "Failed fwdump template exceeds array by %x bytes\n",
5574                     (uint32_t)(dlen - risc_size * sizeof(*dcode)));
5575                 goto default_template;
5576         }
5577         ha->fw_dump_template_len = dlen;
5578         return rval;
5579
5580 default_template:
5581         ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
5582         if (ha->fw_dump_template)
5583                 vfree(ha->fw_dump_template);
5584         ha->fw_dump_template = NULL;
5585         ha->fw_dump_template_len = 0;
5586
5587         dlen = qla27xx_fwdt_template_default_size();
5588         ql_dbg(ql_dbg_init, vha, 0x0169,
5589             "-> template allocating %x bytes...\n", dlen);
5590         ha->fw_dump_template = vmalloc(dlen);
5591         if (!ha->fw_dump_template) {
5592                 ql_log(ql_log_warn, vha, 0x016a,
5593                     "Failed fwdump template allocate %x bytes.\n", risc_size);
5594                 goto failed_template;
5595         }
5596
5597         dcode = ha->fw_dump_template;
5598         risc_size = dlen / sizeof(*dcode);
5599         memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
5600         for (i = 0; i < risc_size; i++)
5601                 dcode[i] = be32_to_cpu(dcode[i]);
5602
5603         if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
5604                 ql_log(ql_log_warn, vha, 0x016b,
5605                     "Failed fwdump template validate\n");
5606                 goto failed_template;
5607         }
5608
5609         dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
5610         ql_dbg(ql_dbg_init, vha, 0x016c,
5611             "-> template size %x bytes\n", dlen);
5612         ha->fw_dump_template_len = dlen;
5613         return rval;
5614
5615 failed_template:
5616         ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
5617         if (ha->fw_dump_template)
5618                 vfree(ha->fw_dump_template);
5619         ha->fw_dump_template = NULL;
5620         ha->fw_dump_template_len = 0;
5621         return rval;
5622 }
5623
5624 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
5625
5626 int
5627 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5628 {
5629         int     rval;
5630         int     i, fragment;
5631         uint16_t *wcode, *fwcode;
5632         uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
5633         struct fw_blob *blob;
5634         struct qla_hw_data *ha = vha->hw;
5635         struct req_que *req = ha->req_q_map[0];
5636
5637         /* Load firmware blob. */
5638         blob = qla2x00_request_firmware(vha);
5639         if (!blob) {
5640                 ql_log(ql_log_info, vha, 0x0083,
5641                     "Firmware image unavailable.\n");
5642                 ql_log(ql_log_info, vha, 0x0084,
5643                     "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
5644                 return QLA_FUNCTION_FAILED;
5645         }
5646
5647         rval = QLA_SUCCESS;
5648
5649         wcode = (uint16_t *)req->ring;
5650         *srisc_addr = 0;
5651         fwcode = (uint16_t *)blob->fw->data;
5652         fwclen = 0;
5653
5654         /* Validate firmware image by checking version. */
5655         if (blob->fw->size < 8 * sizeof(uint16_t)) {
5656                 ql_log(ql_log_fatal, vha, 0x0085,
5657                     "Unable to verify integrity of firmware image (%Zd).\n",
5658                     blob->fw->size);
5659                 goto fail_fw_integrity;
5660         }
5661         for (i = 0; i < 4; i++)
5662                 wcode[i] = be16_to_cpu(fwcode[i + 4]);
5663         if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
5664             wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
5665                 wcode[2] == 0 && wcode[3] == 0)) {
5666                 ql_log(ql_log_fatal, vha, 0x0086,
5667                     "Unable to verify integrity of firmware image.\n");
5668                 ql_log(ql_log_fatal, vha, 0x0087,
5669                     "Firmware data: %04x %04x %04x %04x.\n",
5670                     wcode[0], wcode[1], wcode[2], wcode[3]);
5671                 goto fail_fw_integrity;
5672         }
5673
5674         seg = blob->segs;
5675         while (*seg && rval == QLA_SUCCESS) {
5676                 risc_addr = *seg;
5677                 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
5678                 risc_size = be16_to_cpu(fwcode[3]);
5679
5680                 /* Validate firmware image size. */
5681                 fwclen += risc_size * sizeof(uint16_t);
5682                 if (blob->fw->size < fwclen) {
5683                         ql_log(ql_log_fatal, vha, 0x0088,
5684                             "Unable to verify integrity of firmware image "
5685                             "(%Zd).\n", blob->fw->size);
5686                         goto fail_fw_integrity;
5687                 }
5688
5689                 fragment = 0;
5690                 while (risc_size > 0 && rval == QLA_SUCCESS) {
5691                         wlen = (uint16_t)(ha->fw_transfer_size >> 1);
5692                         if (wlen > risc_size)
5693                                 wlen = risc_size;
5694                         ql_dbg(ql_dbg_init, vha, 0x0089,
5695                             "Loading risc segment@ risc addr %x number of "
5696                             "words 0x%x.\n", risc_addr, wlen);
5697
5698                         for (i = 0; i < wlen; i++)
5699                                 wcode[i] = swab16(fwcode[i]);
5700
5701                         rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5702                             wlen);
5703                         if (rval) {
5704                                 ql_log(ql_log_fatal, vha, 0x008a,
5705                                     "Failed to load segment %d of firmware.\n",
5706                                     fragment);
5707                                 break;
5708                         }
5709
5710                         fwcode += wlen;
5711                         risc_addr += wlen;
5712                         risc_size -= wlen;
5713                         fragment++;
5714                 }
5715
5716                 /* Next segment. */
5717                 seg++;
5718         }
5719         return rval;
5720
5721 fail_fw_integrity:
5722         return QLA_FUNCTION_FAILED;
5723 }
5724
5725 static int
5726 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5727 {
5728         int     rval;
5729         int     segments, fragment;
5730         uint32_t *dcode, dlen;
5731         uint32_t risc_addr;
5732         uint32_t risc_size;
5733         uint32_t i;
5734         struct fw_blob *blob;
5735         const uint32_t *fwcode;
5736         uint32_t fwclen;
5737         struct qla_hw_data *ha = vha->hw;
5738         struct req_que *req = ha->req_q_map[0];
5739
5740         /* Load firmware blob. */
5741         blob = qla2x00_request_firmware(vha);
5742         if (!blob) {
5743                 ql_log(ql_log_warn, vha, 0x0090,
5744                     "Firmware image unavailable.\n");
5745                 ql_log(ql_log_warn, vha, 0x0091,
5746                     "Firmware images can be retrieved from: "
5747                     QLA_FW_URL ".\n");
5748
5749                 return QLA_FUNCTION_FAILED;
5750         }
5751
5752         ql_dbg(ql_dbg_init, vha, 0x0092,
5753             "FW: Loading via request-firmware.\n");
5754
5755         rval = QLA_SUCCESS;
5756
5757         segments = FA_RISC_CODE_SEGMENTS;
5758         dcode = (uint32_t *)req->ring;
5759         *srisc_addr = 0;
5760         fwcode = (uint32_t *)blob->fw->data;
5761         fwclen = 0;
5762
5763         /* Validate firmware image by checking version. */
5764         if (blob->fw->size < 8 * sizeof(uint32_t)) {
5765                 ql_log(ql_log_fatal, vha, 0x0093,
5766                     "Unable to verify integrity of firmware image (%Zd).\n",
5767                     blob->fw->size);
5768                 return QLA_FUNCTION_FAILED;
5769         }
5770         for (i = 0; i < 4; i++)
5771                 dcode[i] = be32_to_cpu(fwcode[i + 4]);
5772         if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
5773             dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
5774             (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
5775                 dcode[3] == 0)) {
5776                 ql_log(ql_log_fatal, vha, 0x0094,
5777                     "Unable to verify integrity of firmware image (%Zd).\n",
5778                     blob->fw->size);
5779                 ql_log(ql_log_fatal, vha, 0x0095,
5780                     "Firmware data: %08x %08x %08x %08x.\n",
5781                     dcode[0], dcode[1], dcode[2], dcode[3]);
5782                 return QLA_FUNCTION_FAILED;
5783         }
5784
5785         while (segments && rval == QLA_SUCCESS) {
5786                 risc_addr = be32_to_cpu(fwcode[2]);
5787                 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
5788                 risc_size = be32_to_cpu(fwcode[3]);
5789
5790                 /* Validate firmware image size. */
5791                 fwclen += risc_size * sizeof(uint32_t);
5792                 if (blob->fw->size < fwclen) {
5793                         ql_log(ql_log_fatal, vha, 0x0096,
5794                             "Unable to verify integrity of firmware image "
5795                             "(%Zd).\n", blob->fw->size);
5796                         return QLA_FUNCTION_FAILED;
5797                 }
5798
5799                 fragment = 0;
5800                 while (risc_size > 0 && rval == QLA_SUCCESS) {
5801                         dlen = (uint32_t)(ha->fw_transfer_size >> 2);
5802                         if (dlen > risc_size)
5803                                 dlen = risc_size;
5804
5805                         ql_dbg(ql_dbg_init, vha, 0x0097,
5806                             "Loading risc segment@ risc addr %x "
5807                             "number of dwords 0x%x.\n", risc_addr, dlen);
5808
5809                         for (i = 0; i < dlen; i++)
5810                                 dcode[i] = swab32(fwcode[i]);
5811
5812                         rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5813                             dlen);
5814                         if (rval) {
5815                                 ql_log(ql_log_fatal, vha, 0x0098,
5816                                     "Failed to load segment %d of firmware.\n",
5817                                     fragment);
5818                                 return QLA_FUNCTION_FAILED;
5819                         }
5820
5821                         fwcode += dlen;
5822                         risc_addr += dlen;
5823                         risc_size -= dlen;
5824                         fragment++;
5825                 }
5826
5827                 /* Next segment. */
5828                 segments--;
5829         }
5830
5831         if (!IS_QLA27XX(ha))
5832                 return rval;
5833
5834         if (ha->fw_dump_template)
5835                 vfree(ha->fw_dump_template);
5836         ha->fw_dump_template = NULL;
5837         ha->fw_dump_template_len = 0;
5838
5839         ql_dbg(ql_dbg_init, vha, 0x171,
5840             "Loading fwdump template from %x\n",
5841             (uint32_t)((void *)fwcode - (void *)blob->fw->data));
5842         risc_size = be32_to_cpu(fwcode[2]);
5843         ql_dbg(ql_dbg_init, vha, 0x172,
5844             "-> array size %x dwords\n", risc_size);
5845         if (risc_size == 0 || risc_size == ~0)
5846                 goto default_template;
5847
5848         dlen = (risc_size - 8) * sizeof(*fwcode);
5849         ql_dbg(ql_dbg_init, vha, 0x0173,
5850             "-> template allocating %x bytes...\n", dlen);
5851         ha->fw_dump_template = vmalloc(dlen);
5852         if (!ha->fw_dump_template) {
5853                 ql_log(ql_log_warn, vha, 0x0174,
5854                     "Failed fwdump template allocate %x bytes.\n", risc_size);
5855                 goto default_template;
5856         }
5857
5858         fwcode += 7;
5859         risc_size -= 8;
5860         dcode = ha->fw_dump_template;
5861         for (i = 0; i < risc_size; i++)
5862                 dcode[i] = le32_to_cpu(fwcode[i]);
5863
5864         if (!qla27xx_fwdt_template_valid(dcode)) {
5865                 ql_log(ql_log_warn, vha, 0x0175,
5866                     "Failed fwdump template validate\n");
5867                 goto default_template;
5868         }
5869
5870         dlen = qla27xx_fwdt_template_size(dcode);
5871         ql_dbg(ql_dbg_init, vha, 0x0176,
5872             "-> template size %x bytes\n", dlen);
5873         if (dlen > risc_size * sizeof(*fwcode)) {
5874                 ql_log(ql_log_warn, vha, 0x0177,
5875                     "Failed fwdump template exceeds array by %x bytes\n",
5876                     (uint32_t)(dlen - risc_size * sizeof(*fwcode)));
5877                 goto default_template;
5878         }
5879         ha->fw_dump_template_len = dlen;
5880         return rval;
5881
5882 default_template:
5883         ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
5884         if (ha->fw_dump_template)
5885                 vfree(ha->fw_dump_template);
5886         ha->fw_dump_template = NULL;
5887         ha->fw_dump_template_len = 0;
5888
5889         dlen = qla27xx_fwdt_template_default_size();
5890         ql_dbg(ql_dbg_init, vha, 0x0179,
5891             "-> template allocating %x bytes...\n", dlen);
5892         ha->fw_dump_template = vmalloc(dlen);
5893         if (!ha->fw_dump_template) {
5894                 ql_log(ql_log_warn, vha, 0x017a,
5895                     "Failed fwdump template allocate %x bytes.\n", risc_size);
5896                 goto failed_template;
5897         }
5898
5899         dcode = ha->fw_dump_template;
5900         risc_size = dlen / sizeof(*fwcode);
5901         fwcode = qla27xx_fwdt_template_default();
5902         for (i = 0; i < risc_size; i++)
5903                 dcode[i] = be32_to_cpu(fwcode[i]);
5904
5905         if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
5906                 ql_log(ql_log_warn, vha, 0x017b,
5907                     "Failed fwdump template validate\n");
5908                 goto failed_template;
5909         }
5910
5911         dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
5912         ql_dbg(ql_dbg_init, vha, 0x017c,
5913             "-> template size %x bytes\n", dlen);
5914         ha->fw_dump_template_len = dlen;
5915         return rval;
5916
5917 failed_template:
5918         ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
5919         if (ha->fw_dump_template)
5920                 vfree(ha->fw_dump_template);
5921         ha->fw_dump_template = NULL;
5922         ha->fw_dump_template_len = 0;
5923         return rval;
5924 }
5925
5926 int
5927 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5928 {
5929         int rval;
5930
5931         if (ql2xfwloadbin == 1)
5932                 return qla81xx_load_risc(vha, srisc_addr);
5933
5934         /*
5935          * FW Load priority:
5936          * 1) Firmware via request-firmware interface (.bin file).
5937          * 2) Firmware residing in flash.
5938          */
5939         rval = qla24xx_load_risc_blob(vha, srisc_addr);
5940         if (rval == QLA_SUCCESS)
5941                 return rval;
5942
5943         return qla24xx_load_risc_flash(vha, srisc_addr,
5944             vha->hw->flt_region_fw);
5945 }
5946
5947 int
5948 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5949 {
5950         int rval;
5951         struct qla_hw_data *ha = vha->hw;
5952
5953         if (ql2xfwloadbin == 2)
5954                 goto try_blob_fw;
5955
5956         /*
5957          * FW Load priority:
5958          * 1) Firmware residing in flash.
5959          * 2) Firmware via request-firmware interface (.bin file).
5960          * 3) Golden-Firmware residing in flash -- limited operation.
5961          */
5962         rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
5963         if (rval == QLA_SUCCESS)
5964                 return rval;
5965
5966 try_blob_fw:
5967         rval = qla24xx_load_risc_blob(vha, srisc_addr);
5968         if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
5969                 return rval;
5970
5971         ql_log(ql_log_info, vha, 0x0099,
5972             "Attempting to fallback to golden firmware.\n");
5973         rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
5974         if (rval != QLA_SUCCESS)
5975                 return rval;
5976
5977         ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
5978         ha->flags.running_gold_fw = 1;
5979         return rval;
5980 }
5981
5982 void
5983 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
5984 {
5985         int ret, retries;
5986         struct qla_hw_data *ha = vha->hw;
5987
5988         if (ha->flags.pci_channel_io_perm_failure)
5989                 return;
5990         if (!IS_FWI2_CAPABLE(ha))
5991                 return;
5992         if (!ha->fw_major_version)
5993                 return;
5994
5995         ret = qla2x00_stop_firmware(vha);
5996         for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
5997             ret != QLA_INVALID_COMMAND && retries ; retries--) {
5998                 ha->isp_ops->reset_chip(vha);
5999                 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
6000                         continue;
6001                 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
6002                         continue;
6003                 ql_log(ql_log_info, vha, 0x8015,
6004                     "Attempting retry of stop-firmware command.\n");
6005                 ret = qla2x00_stop_firmware(vha);
6006         }
6007 }
6008
6009 int
6010 qla24xx_configure_vhba(scsi_qla_host_t *vha)
6011 {
6012         int rval = QLA_SUCCESS;
6013         int rval2;
6014         uint16_t mb[MAILBOX_REGISTER_COUNT];
6015         struct qla_hw_data *ha = vha->hw;
6016         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6017         struct req_que *req;
6018         struct rsp_que *rsp;
6019
6020         if (!vha->vp_idx)
6021                 return -EINVAL;
6022
6023         rval = qla2x00_fw_ready(base_vha);
6024         if (ha->flags.cpu_affinity_enabled)
6025                 req = ha->req_q_map[0];
6026         else
6027                 req = vha->req;
6028         rsp = req->rsp;
6029
6030         if (rval == QLA_SUCCESS) {
6031                 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6032                 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
6033         }
6034
6035         vha->flags.management_server_logged_in = 0;
6036
6037         /* Login to SNS first */
6038         rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
6039             BIT_1);
6040         if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
6041                 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
6042                         ql_dbg(ql_dbg_init, vha, 0x0120,
6043                             "Failed SNS login: loop_id=%x, rval2=%d\n",
6044                             NPH_SNS, rval2);
6045                 else
6046                         ql_dbg(ql_dbg_init, vha, 0x0103,
6047                             "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
6048                             "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
6049                             NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
6050                 return (QLA_FUNCTION_FAILED);
6051         }
6052
6053         atomic_set(&vha->loop_down_timer, 0);
6054         atomic_set(&vha->loop_state, LOOP_UP);
6055         set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6056         set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
6057         rval = qla2x00_loop_resync(base_vha);
6058
6059         return rval;
6060 }
6061
6062 /* 84XX Support **************************************************************/
6063
6064 static LIST_HEAD(qla_cs84xx_list);
6065 static DEFINE_MUTEX(qla_cs84xx_mutex);
6066
6067 static struct qla_chip_state_84xx *
6068 qla84xx_get_chip(struct scsi_qla_host *vha)
6069 {
6070         struct qla_chip_state_84xx *cs84xx;
6071         struct qla_hw_data *ha = vha->hw;
6072
6073         mutex_lock(&qla_cs84xx_mutex);
6074
6075         /* Find any shared 84xx chip. */
6076         list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
6077                 if (cs84xx->bus == ha->pdev->bus) {
6078                         kref_get(&cs84xx->kref);
6079                         goto done;
6080                 }
6081         }
6082
6083         cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
6084         if (!cs84xx)
6085                 goto done;
6086
6087         kref_init(&cs84xx->kref);
6088         spin_lock_init(&cs84xx->access_lock);
6089         mutex_init(&cs84xx->fw_update_mutex);
6090         cs84xx->bus = ha->pdev->bus;
6091
6092         list_add_tail(&cs84xx->list, &qla_cs84xx_list);
6093 done:
6094         mutex_unlock(&qla_cs84xx_mutex);
6095         return cs84xx;
6096 }
6097
6098 static void
6099 __qla84xx_chip_release(struct kref *kref)
6100 {
6101         struct qla_chip_state_84xx *cs84xx =
6102             container_of(kref, struct qla_chip_state_84xx, kref);
6103
6104         mutex_lock(&qla_cs84xx_mutex);
6105         list_del(&cs84xx->list);
6106         mutex_unlock(&qla_cs84xx_mutex);
6107         kfree(cs84xx);
6108 }
6109
6110 void
6111 qla84xx_put_chip(struct scsi_qla_host *vha)
6112 {
6113         struct qla_hw_data *ha = vha->hw;
6114         if (ha->cs84xx)
6115                 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
6116 }
6117
6118 static int
6119 qla84xx_init_chip(scsi_qla_host_t *vha)
6120 {
6121         int rval;
6122         uint16_t status[2];
6123         struct qla_hw_data *ha = vha->hw;
6124
6125         mutex_lock(&ha->cs84xx->fw_update_mutex);
6126
6127         rval = qla84xx_verify_chip(vha, status);
6128
6129         mutex_unlock(&ha->cs84xx->fw_update_mutex);
6130
6131         return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
6132             QLA_SUCCESS;
6133 }
6134
6135 /* 81XX Support **************************************************************/
6136
6137 int
6138 qla81xx_nvram_config(scsi_qla_host_t *vha)
6139 {
6140         int   rval;
6141         struct init_cb_81xx *icb;
6142         struct nvram_81xx *nv;
6143         uint32_t *dptr;
6144         uint8_t  *dptr1, *dptr2;
6145         uint32_t chksum;
6146         uint16_t cnt;
6147         struct qla_hw_data *ha = vha->hw;
6148
6149         rval = QLA_SUCCESS;
6150         icb = (struct init_cb_81xx *)ha->init_cb;
6151         nv = ha->nvram;
6152
6153         /* Determine NVRAM starting address. */
6154         ha->nvram_size = sizeof(struct nvram_81xx);
6155         ha->vpd_size = FA_NVRAM_VPD_SIZE;
6156         if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
6157                 ha->vpd_size = FA_VPD_SIZE_82XX;
6158
6159         /* Get VPD data into cache */
6160         ha->vpd = ha->nvram + VPD_OFFSET;
6161         ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
6162             ha->vpd_size);
6163
6164         /* Get NVRAM data into cache and calculate checksum. */
6165         ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
6166             ha->nvram_size);
6167         dptr = (uint32_t *)nv;
6168         for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
6169                 chksum += le32_to_cpu(*dptr);
6170
6171         ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
6172             "Contents of NVRAM:\n");
6173         ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
6174             (uint8_t *)nv, ha->nvram_size);
6175
6176         /* Bad NVRAM data, set defaults parameters. */
6177         if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
6178             || nv->id[3] != ' ' ||
6179             nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
6180                 /* Reset NVRAM data. */
6181                 ql_log(ql_log_info, vha, 0x0073,
6182                     "Inconsistent NVRAM detected: checksum=0x%x id=%c "
6183                     "version=0x%x.\n", chksum, nv->id[0],
6184                     le16_to_cpu(nv->nvram_version));
6185                 ql_log(ql_log_info, vha, 0x0074,
6186                     "Falling back to functioning (yet invalid -- WWPN) "
6187                     "defaults.\n");
6188
6189                 /*
6190                  * Set default initialization control block.
6191                  */
6192                 memset(nv, 0, ha->nvram_size);
6193                 nv->nvram_version = cpu_to_le16(ICB_VERSION);
6194                 nv->version = cpu_to_le16(ICB_VERSION);
6195                 nv->frame_payload_size = 2048;
6196                 nv->execution_throttle = cpu_to_le16(0xFFFF);
6197                 nv->exchange_count = cpu_to_le16(0);
6198                 nv->port_name[0] = 0x21;
6199                 nv->port_name[1] = 0x00 + ha->port_no + 1;
6200                 nv->port_name[2] = 0x00;
6201                 nv->port_name[3] = 0xe0;
6202                 nv->port_name[4] = 0x8b;
6203                 nv->port_name[5] = 0x1c;
6204                 nv->port_name[6] = 0x55;
6205                 nv->port_name[7] = 0x86;
6206                 nv->node_name[0] = 0x20;
6207                 nv->node_name[1] = 0x00;
6208                 nv->node_name[2] = 0x00;
6209                 nv->node_name[3] = 0xe0;
6210                 nv->node_name[4] = 0x8b;
6211                 nv->node_name[5] = 0x1c;
6212                 nv->node_name[6] = 0x55;
6213                 nv->node_name[7] = 0x86;
6214                 nv->login_retry_count = cpu_to_le16(8);
6215                 nv->interrupt_delay_timer = cpu_to_le16(0);
6216                 nv->login_timeout = cpu_to_le16(0);
6217                 nv->firmware_options_1 =
6218                     cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
6219                 nv->firmware_options_2 = cpu_to_le32(2 << 4);
6220                 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6221                 nv->firmware_options_3 = cpu_to_le32(2 << 13);
6222                 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
6223                 nv->efi_parameters = cpu_to_le32(0);
6224                 nv->reset_delay = 5;
6225                 nv->max_luns_per_target = cpu_to_le16(128);
6226                 nv->port_down_retry_count = cpu_to_le16(30);
6227                 nv->link_down_timeout = cpu_to_le16(180);
6228                 nv->enode_mac[0] = 0x00;
6229                 nv->enode_mac[1] = 0xC0;
6230                 nv->enode_mac[2] = 0xDD;
6231                 nv->enode_mac[3] = 0x04;
6232                 nv->enode_mac[4] = 0x05;
6233                 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
6234
6235                 rval = 1;
6236         }
6237
6238         if (IS_T10_PI_CAPABLE(ha))
6239                 nv->frame_payload_size &= ~7;
6240
6241         qlt_81xx_config_nvram_stage1(vha, nv);
6242
6243         /* Reset Initialization control block */
6244         memset(icb, 0, ha->init_cb_size);
6245
6246         /* Copy 1st segment. */
6247         dptr1 = (uint8_t *)icb;
6248         dptr2 = (uint8_t *)&nv->version;
6249         cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
6250         while (cnt--)
6251                 *dptr1++ = *dptr2++;
6252
6253         icb->login_retry_count = nv->login_retry_count;
6254
6255         /* Copy 2nd segment. */
6256         dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
6257         dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
6258         cnt = (uint8_t *)&icb->reserved_5 -
6259             (uint8_t *)&icb->interrupt_delay_timer;
6260         while (cnt--)
6261                 *dptr1++ = *dptr2++;
6262
6263         memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
6264         /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
6265         if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
6266                 icb->enode_mac[0] = 0x00;
6267                 icb->enode_mac[1] = 0xC0;
6268                 icb->enode_mac[2] = 0xDD;
6269                 icb->enode_mac[3] = 0x04;
6270                 icb->enode_mac[4] = 0x05;
6271                 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
6272         }
6273
6274         /* Use extended-initialization control block. */
6275         memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
6276
6277         /*
6278          * Setup driver NVRAM options.
6279          */
6280         qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
6281             "QLE8XXX");
6282
6283         qlt_81xx_config_nvram_stage2(vha, icb);
6284
6285         /* Use alternate WWN? */
6286         if (nv->host_p & cpu_to_le32(BIT_15)) {
6287                 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
6288                 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
6289         }
6290
6291         /* Prepare nodename */
6292         if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
6293                 /*
6294                  * Firmware will apply the following mask if the nodename was
6295                  * not provided.
6296                  */
6297                 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
6298                 icb->node_name[0] &= 0xF0;
6299         }
6300
6301         /* Set host adapter parameters. */
6302         ha->flags.disable_risc_code_load = 0;
6303         ha->flags.enable_lip_reset = 0;
6304         ha->flags.enable_lip_full_login =
6305             le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
6306         ha->flags.enable_target_reset =
6307             le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
6308         ha->flags.enable_led_scheme = 0;
6309         ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
6310
6311         ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
6312             (BIT_6 | BIT_5 | BIT_4)) >> 4;
6313
6314         /* save HBA serial number */
6315         ha->serial0 = icb->port_name[5];
6316         ha->serial1 = icb->port_name[6];
6317         ha->serial2 = icb->port_name[7];
6318         memcpy(vha->node_name, icb->node_name, WWN_SIZE);
6319         memcpy(vha->port_name, icb->port_name, WWN_SIZE);
6320
6321         icb->execution_throttle = cpu_to_le16(0xFFFF);
6322
6323         ha->retry_count = le16_to_cpu(nv->login_retry_count);
6324
6325         /* Set minimum login_timeout to 4 seconds. */
6326         if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
6327                 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
6328         if (le16_to_cpu(nv->login_timeout) < 4)
6329                 nv->login_timeout = cpu_to_le16(4);
6330         ha->login_timeout = le16_to_cpu(nv->login_timeout);
6331
6332         /* Set minimum RATOV to 100 tenths of a second. */
6333         ha->r_a_tov = 100;
6334
6335         ha->loop_reset_delay = nv->reset_delay;
6336
6337         /* Link Down Timeout = 0:
6338          *
6339          *      When Port Down timer expires we will start returning
6340          *      I/O's to OS with "DID_NO_CONNECT".
6341          *
6342          * Link Down Timeout != 0:
6343          *
6344          *       The driver waits for the link to come up after link down
6345          *       before returning I/Os to OS with "DID_NO_CONNECT".
6346          */
6347         if (le16_to_cpu(nv->link_down_timeout) == 0) {
6348                 ha->loop_down_abort_time =
6349                     (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
6350         } else {
6351                 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
6352                 ha->loop_down_abort_time =
6353                     (LOOP_DOWN_TIME - ha->link_down_timeout);
6354         }
6355
6356         /* Need enough time to try and get the port back. */
6357         ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
6358         if (qlport_down_retry)
6359                 ha->port_down_retry_count = qlport_down_retry;
6360
6361         /* Set login_retry_count */
6362         ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
6363         if (ha->port_down_retry_count ==
6364             le16_to_cpu(nv->port_down_retry_count) &&
6365             ha->port_down_retry_count > 3)
6366                 ha->login_retry_count = ha->port_down_retry_count;
6367         else if (ha->port_down_retry_count > (int)ha->login_retry_count)
6368                 ha->login_retry_count = ha->port_down_retry_count;
6369         if (ql2xloginretrycount)
6370                 ha->login_retry_count = ql2xloginretrycount;
6371
6372         /* if not running MSI-X we need handshaking on interrupts */
6373         if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
6374                 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
6375
6376         /* Enable ZIO. */
6377         if (!vha->flags.init_done) {
6378                 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
6379                     (BIT_3 | BIT_2 | BIT_1 | BIT_0);
6380                 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
6381                     le16_to_cpu(icb->interrupt_delay_timer): 2;
6382         }
6383         icb->firmware_options_2 &= cpu_to_le32(
6384             ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
6385         vha->flags.process_response_queue = 0;
6386         if (ha->zio_mode != QLA_ZIO_DISABLED) {
6387                 ha->zio_mode = QLA_ZIO_MODE_6;
6388
6389                 ql_log(ql_log_info, vha, 0x0075,
6390                     "ZIO mode %d enabled; timer delay (%d us).\n",
6391                     ha->zio_mode,
6392                     ha->zio_timer * 100);
6393
6394                 icb->firmware_options_2 |= cpu_to_le32(
6395                     (uint32_t)ha->zio_mode);
6396                 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
6397                 vha->flags.process_response_queue = 1;
6398         }
6399
6400         if (rval) {
6401                 ql_log(ql_log_warn, vha, 0x0076,
6402                     "NVRAM configuration failed.\n");
6403         }
6404         return (rval);
6405 }
6406
6407 int
6408 qla82xx_restart_isp(scsi_qla_host_t *vha)
6409 {
6410         int status, rval;
6411         struct qla_hw_data *ha = vha->hw;
6412         struct req_que *req = ha->req_q_map[0];
6413         struct rsp_que *rsp = ha->rsp_q_map[0];
6414         struct scsi_qla_host *vp;
6415         unsigned long flags;
6416
6417         status = qla2x00_init_rings(vha);
6418         if (!status) {
6419                 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6420                 ha->flags.chip_reset_done = 1;
6421
6422                 status = qla2x00_fw_ready(vha);
6423                 if (!status) {
6424                         /* Issue a marker after FW becomes ready. */
6425                         qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
6426                         vha->flags.online = 1;
6427                         set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6428                 }
6429
6430                 /* if no cable then assume it's good */
6431                 if ((vha->device_flags & DFLG_NO_CABLE))
6432                         status = 0;
6433         }
6434
6435         if (!status) {
6436                 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6437
6438                 if (!atomic_read(&vha->loop_down_timer)) {
6439                         /*
6440                          * Issue marker command only when we are going
6441                          * to start the I/O .
6442                          */
6443                         vha->marker_needed = 1;
6444                 }
6445
6446                 ha->isp_ops->enable_intrs(ha);
6447
6448                 ha->isp_abort_cnt = 0;
6449                 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6450
6451                 /* Update the firmware version */
6452                 status = qla82xx_check_md_needed(vha);
6453
6454                 if (ha->fce) {
6455                         ha->flags.fce_enabled = 1;
6456                         memset(ha->fce, 0,
6457                             fce_calc_size(ha->fce_bufs));
6458                         rval = qla2x00_enable_fce_trace(vha,
6459                             ha->fce_dma, ha->fce_bufs, ha->fce_mb,
6460                             &ha->fce_bufs);
6461                         if (rval) {
6462                                 ql_log(ql_log_warn, vha, 0x8001,
6463                                     "Unable to reinitialize FCE (%d).\n",
6464                                     rval);
6465                                 ha->flags.fce_enabled = 0;
6466                         }
6467                 }
6468
6469                 if (ha->eft) {
6470                         memset(ha->eft, 0, EFT_SIZE);
6471                         rval = qla2x00_enable_eft_trace(vha,
6472                             ha->eft_dma, EFT_NUM_BUFFERS);
6473                         if (rval) {
6474                                 ql_log(ql_log_warn, vha, 0x8010,
6475                                     "Unable to reinitialize EFT (%d).\n",
6476                                     rval);
6477                         }
6478                 }
6479         }
6480
6481         if (!status) {
6482                 ql_dbg(ql_dbg_taskm, vha, 0x8011,
6483                     "qla82xx_restart_isp succeeded.\n");
6484
6485                 spin_lock_irqsave(&ha->vport_slock, flags);
6486                 list_for_each_entry(vp, &ha->vp_list, list) {
6487                         if (vp->vp_idx) {
6488                                 atomic_inc(&vp->vref_count);
6489                                 spin_unlock_irqrestore(&ha->vport_slock, flags);
6490
6491                                 qla2x00_vp_abort_isp(vp);
6492
6493                                 spin_lock_irqsave(&ha->vport_slock, flags);
6494                                 atomic_dec(&vp->vref_count);
6495                         }
6496                 }
6497                 spin_unlock_irqrestore(&ha->vport_slock, flags);
6498
6499         } else {
6500                 ql_log(ql_log_warn, vha, 0x8016,
6501                     "qla82xx_restart_isp **** FAILED ****.\n");
6502         }
6503
6504         return status;
6505 }
6506
6507 void
6508 qla81xx_update_fw_options(scsi_qla_host_t *vha)
6509 {
6510         struct qla_hw_data *ha = vha->hw;
6511
6512         /*  Hold status IOCBs until ABTS response received. */
6513         if (ql2xfwholdabts)
6514                 ha->fw_options[3] |= BIT_12;
6515
6516         if (!ql2xetsenable)
6517                 goto out;
6518
6519         /* Enable ETS Burst. */
6520         memset(ha->fw_options, 0, sizeof(ha->fw_options));
6521         ha->fw_options[2] |= BIT_9;
6522 out:
6523         qla2x00_set_fw_options(vha, ha->fw_options);
6524 }
6525
6526 /*
6527  * qla24xx_get_fcp_prio
6528  *      Gets the fcp cmd priority value for the logged in port.
6529  *      Looks for a match of the port descriptors within
6530  *      each of the fcp prio config entries. If a match is found,
6531  *      the tag (priority) value is returned.
6532  *
6533  * Input:
6534  *      vha = scsi host structure pointer.
6535  *      fcport = port structure pointer.
6536  *
6537  * Return:
6538  *      non-zero (if found)
6539  *      -1 (if not found)
6540  *
6541  * Context:
6542  *      Kernel context
6543  */
6544 static int
6545 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
6546 {
6547         int i, entries;
6548         uint8_t pid_match, wwn_match;
6549         int priority;
6550         uint32_t pid1, pid2;
6551         uint64_t wwn1, wwn2;
6552         struct qla_fcp_prio_entry *pri_entry;
6553         struct qla_hw_data *ha = vha->hw;
6554
6555         if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
6556                 return -1;
6557
6558         priority = -1;
6559         entries = ha->fcp_prio_cfg->num_entries;
6560         pri_entry = &ha->fcp_prio_cfg->entry[0];
6561
6562         for (i = 0; i < entries; i++) {
6563                 pid_match = wwn_match = 0;
6564
6565                 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
6566                         pri_entry++;
6567                         continue;
6568                 }
6569
6570                 /* check source pid for a match */
6571                 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
6572                         pid1 = pri_entry->src_pid & INVALID_PORT_ID;
6573                         pid2 = vha->d_id.b24 & INVALID_PORT_ID;
6574                         if (pid1 == INVALID_PORT_ID)
6575                                 pid_match++;
6576                         else if (pid1 == pid2)
6577                                 pid_match++;
6578                 }
6579
6580                 /* check destination pid for a match */
6581                 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
6582                         pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
6583                         pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
6584                         if (pid1 == INVALID_PORT_ID)
6585                                 pid_match++;
6586                         else if (pid1 == pid2)
6587                                 pid_match++;
6588                 }
6589
6590                 /* check source WWN for a match */
6591                 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
6592                         wwn1 = wwn_to_u64(vha->port_name);
6593                         wwn2 = wwn_to_u64(pri_entry->src_wwpn);
6594                         if (wwn2 == (uint64_t)-1)
6595                                 wwn_match++;
6596                         else if (wwn1 == wwn2)
6597                                 wwn_match++;
6598                 }
6599
6600                 /* check destination WWN for a match */
6601                 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
6602                         wwn1 = wwn_to_u64(fcport->port_name);
6603                         wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
6604                         if (wwn2 == (uint64_t)-1)
6605                                 wwn_match++;
6606                         else if (wwn1 == wwn2)
6607                                 wwn_match++;
6608                 }
6609
6610                 if (pid_match == 2 || wwn_match == 2) {
6611                         /* Found a matching entry */
6612                         if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
6613                                 priority = pri_entry->tag;
6614                         break;
6615                 }
6616
6617                 pri_entry++;
6618         }
6619
6620         return priority;
6621 }
6622
6623 /*
6624  * qla24xx_update_fcport_fcp_prio
6625  *      Activates fcp priority for the logged in fc port
6626  *
6627  * Input:
6628  *      vha = scsi host structure pointer.
6629  *      fcp = port structure pointer.
6630  *
6631  * Return:
6632  *      QLA_SUCCESS or QLA_FUNCTION_FAILED
6633  *
6634  * Context:
6635  *      Kernel context.
6636  */
6637 int
6638 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
6639 {
6640         int ret;
6641         int priority;
6642         uint16_t mb[5];
6643
6644         if (fcport->port_type != FCT_TARGET ||
6645             fcport->loop_id == FC_NO_LOOP_ID)
6646                 return QLA_FUNCTION_FAILED;
6647
6648         priority = qla24xx_get_fcp_prio(vha, fcport);
6649         if (priority < 0)
6650                 return QLA_FUNCTION_FAILED;
6651
6652         if (IS_P3P_TYPE(vha->hw)) {
6653                 fcport->fcp_prio = priority & 0xf;
6654                 return QLA_SUCCESS;
6655         }
6656
6657         ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
6658         if (ret == QLA_SUCCESS) {
6659                 if (fcport->fcp_prio != priority)
6660                         ql_dbg(ql_dbg_user, vha, 0x709e,
6661                             "Updated FCP_CMND priority - value=%d loop_id=%d "
6662                             "port_id=%02x%02x%02x.\n", priority,
6663                             fcport->loop_id, fcport->d_id.b.domain,
6664                             fcport->d_id.b.area, fcport->d_id.b.al_pa);
6665                 fcport->fcp_prio = priority & 0xf;
6666         } else
6667                 ql_dbg(ql_dbg_user, vha, 0x704f,
6668                     "Unable to update FCP_CMND priority - ret=0x%x for "
6669                     "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
6670                     fcport->d_id.b.domain, fcport->d_id.b.area,
6671                     fcport->d_id.b.al_pa);
6672         return  ret;
6673 }
6674
6675 /*
6676  * qla24xx_update_all_fcp_prio
6677  *      Activates fcp priority for all the logged in ports
6678  *
6679  * Input:
6680  *      ha = adapter block pointer.
6681  *
6682  * Return:
6683  *      QLA_SUCCESS or QLA_FUNCTION_FAILED
6684  *
6685  * Context:
6686  *      Kernel context.
6687  */
6688 int
6689 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
6690 {
6691         int ret;
6692         fc_port_t *fcport;
6693
6694         ret = QLA_FUNCTION_FAILED;
6695         /* We need to set priority for all logged in ports */
6696         list_for_each_entry(fcport, &vha->vp_fcports, list)
6697                 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
6698
6699         return ret;
6700 }