Merge branch 'for-3.3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[cascardo/linux.git] / drivers / scsi / lpfc / lpfc_init.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/kthread.h>
29 #include <linux/pci.h>
30 #include <linux/spinlock.h>
31 #include <linux/ctype.h>
32 #include <linux/aer.h>
33 #include <linux/slab.h>
34 #include <linux/firmware.h>
35
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_device.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_transport_fc.h>
40
41 #include "lpfc_hw4.h"
42 #include "lpfc_hw.h"
43 #include "lpfc_sli.h"
44 #include "lpfc_sli4.h"
45 #include "lpfc_nl.h"
46 #include "lpfc_disc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_version.h"
53
54 char *_dump_buf_data;
55 unsigned long _dump_buf_data_order;
56 char *_dump_buf_dif;
57 unsigned long _dump_buf_dif_order;
58 spinlock_t _dump_buf_lock;
59
60 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
61 static int lpfc_post_rcv_buf(struct lpfc_hba *);
62 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
63 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
64 static int lpfc_setup_endian_order(struct lpfc_hba *);
65 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
66 static void lpfc_free_sgl_list(struct lpfc_hba *);
67 static int lpfc_init_sgl_list(struct lpfc_hba *);
68 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
69 static void lpfc_free_active_sgl(struct lpfc_hba *);
70 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
71 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
72 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
73 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
74 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
75
76 static struct scsi_transport_template *lpfc_transport_template = NULL;
77 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
78 static DEFINE_IDR(lpfc_hba_index);
79
80 /**
81  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
82  * @phba: pointer to lpfc hba data structure.
83  *
84  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
85  * mailbox command. It retrieves the revision information from the HBA and
86  * collects the Vital Product Data (VPD) about the HBA for preparing the
87  * configuration of the HBA.
88  *
89  * Return codes:
90  *   0 - success.
91  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
92  *   Any other value - indicates an error.
93  **/
94 int
95 lpfc_config_port_prep(struct lpfc_hba *phba)
96 {
97         lpfc_vpd_t *vp = &phba->vpd;
98         int i = 0, rc;
99         LPFC_MBOXQ_t *pmb;
100         MAILBOX_t *mb;
101         char *lpfc_vpd_data = NULL;
102         uint16_t offset = 0;
103         static char licensed[56] =
104                     "key unlock for use with gnu public licensed code only\0";
105         static int init_key = 1;
106
107         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
108         if (!pmb) {
109                 phba->link_state = LPFC_HBA_ERROR;
110                 return -ENOMEM;
111         }
112
113         mb = &pmb->u.mb;
114         phba->link_state = LPFC_INIT_MBX_CMDS;
115
116         if (lpfc_is_LC_HBA(phba->pcidev->device)) {
117                 if (init_key) {
118                         uint32_t *ptext = (uint32_t *) licensed;
119
120                         for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
121                                 *ptext = cpu_to_be32(*ptext);
122                         init_key = 0;
123                 }
124
125                 lpfc_read_nv(phba, pmb);
126                 memset((char*)mb->un.varRDnvp.rsvd3, 0,
127                         sizeof (mb->un.varRDnvp.rsvd3));
128                 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
129                          sizeof (licensed));
130
131                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
132
133                 if (rc != MBX_SUCCESS) {
134                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
135                                         "0324 Config Port initialization "
136                                         "error, mbxCmd x%x READ_NVPARM, "
137                                         "mbxStatus x%x\n",
138                                         mb->mbxCommand, mb->mbxStatus);
139                         mempool_free(pmb, phba->mbox_mem_pool);
140                         return -ERESTART;
141                 }
142                 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
143                        sizeof(phba->wwnn));
144                 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
145                        sizeof(phba->wwpn));
146         }
147
148         phba->sli3_options = 0x0;
149
150         /* Setup and issue mailbox READ REV command */
151         lpfc_read_rev(phba, pmb);
152         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
153         if (rc != MBX_SUCCESS) {
154                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
155                                 "0439 Adapter failed to init, mbxCmd x%x "
156                                 "READ_REV, mbxStatus x%x\n",
157                                 mb->mbxCommand, mb->mbxStatus);
158                 mempool_free( pmb, phba->mbox_mem_pool);
159                 return -ERESTART;
160         }
161
162
163         /*
164          * The value of rr must be 1 since the driver set the cv field to 1.
165          * This setting requires the FW to set all revision fields.
166          */
167         if (mb->un.varRdRev.rr == 0) {
168                 vp->rev.rBit = 0;
169                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
170                                 "0440 Adapter failed to init, READ_REV has "
171                                 "missing revision information.\n");
172                 mempool_free(pmb, phba->mbox_mem_pool);
173                 return -ERESTART;
174         }
175
176         if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
177                 mempool_free(pmb, phba->mbox_mem_pool);
178                 return -EINVAL;
179         }
180
181         /* Save information as VPD data */
182         vp->rev.rBit = 1;
183         memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
184         vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
185         memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
186         vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
187         memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
188         vp->rev.biuRev = mb->un.varRdRev.biuRev;
189         vp->rev.smRev = mb->un.varRdRev.smRev;
190         vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
191         vp->rev.endecRev = mb->un.varRdRev.endecRev;
192         vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
193         vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
194         vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
195         vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
196         vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
197         vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
198
199         /* If the sli feature level is less then 9, we must
200          * tear down all RPIs and VPIs on link down if NPIV
201          * is enabled.
202          */
203         if (vp->rev.feaLevelHigh < 9)
204                 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
205
206         if (lpfc_is_LC_HBA(phba->pcidev->device))
207                 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
208                                                 sizeof (phba->RandomData));
209
210         /* Get adapter VPD information */
211         lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
212         if (!lpfc_vpd_data)
213                 goto out_free_mbox;
214         do {
215                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
216                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
217
218                 if (rc != MBX_SUCCESS) {
219                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
220                                         "0441 VPD not present on adapter, "
221                                         "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
222                                         mb->mbxCommand, mb->mbxStatus);
223                         mb->un.varDmp.word_cnt = 0;
224                 }
225                 /* dump mem may return a zero when finished or we got a
226                  * mailbox error, either way we are done.
227                  */
228                 if (mb->un.varDmp.word_cnt == 0)
229                         break;
230                 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
231                         mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
232                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
233                                       lpfc_vpd_data + offset,
234                                       mb->un.varDmp.word_cnt);
235                 offset += mb->un.varDmp.word_cnt;
236         } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
237         lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
238
239         kfree(lpfc_vpd_data);
240 out_free_mbox:
241         mempool_free(pmb, phba->mbox_mem_pool);
242         return 0;
243 }
244
245 /**
246  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
247  * @phba: pointer to lpfc hba data structure.
248  * @pmboxq: pointer to the driver internal queue element for mailbox command.
249  *
250  * This is the completion handler for driver's configuring asynchronous event
251  * mailbox command to the device. If the mailbox command returns successfully,
252  * it will set internal async event support flag to 1; otherwise, it will
253  * set internal async event support flag to 0.
254  **/
255 static void
256 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
257 {
258         if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
259                 phba->temp_sensor_support = 1;
260         else
261                 phba->temp_sensor_support = 0;
262         mempool_free(pmboxq, phba->mbox_mem_pool);
263         return;
264 }
265
266 /**
267  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
268  * @phba: pointer to lpfc hba data structure.
269  * @pmboxq: pointer to the driver internal queue element for mailbox command.
270  *
271  * This is the completion handler for dump mailbox command for getting
272  * wake up parameters. When this command complete, the response contain
273  * Option rom version of the HBA. This function translate the version number
274  * into a human readable string and store it in OptionROMVersion.
275  **/
276 static void
277 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
278 {
279         struct prog_id *prg;
280         uint32_t prog_id_word;
281         char dist = ' ';
282         /* character array used for decoding dist type. */
283         char dist_char[] = "nabx";
284
285         if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
286                 mempool_free(pmboxq, phba->mbox_mem_pool);
287                 return;
288         }
289
290         prg = (struct prog_id *) &prog_id_word;
291
292         /* word 7 contain option rom version */
293         prog_id_word = pmboxq->u.mb.un.varWords[7];
294
295         /* Decode the Option rom version word to a readable string */
296         if (prg->dist < 4)
297                 dist = dist_char[prg->dist];
298
299         if ((prg->dist == 3) && (prg->num == 0))
300                 sprintf(phba->OptionROMVersion, "%d.%d%d",
301                         prg->ver, prg->rev, prg->lev);
302         else
303                 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
304                         prg->ver, prg->rev, prg->lev,
305                         dist, prg->num);
306         mempool_free(pmboxq, phba->mbox_mem_pool);
307         return;
308 }
309
310 /**
311  * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
312  *      cfg_soft_wwnn, cfg_soft_wwpn
313  * @vport: pointer to lpfc vport data structure.
314  *
315  *
316  * Return codes
317  *   None.
318  **/
319 void
320 lpfc_update_vport_wwn(struct lpfc_vport *vport)
321 {
322         /* If the soft name exists then update it using the service params */
323         if (vport->phba->cfg_soft_wwnn)
324                 u64_to_wwn(vport->phba->cfg_soft_wwnn,
325                            vport->fc_sparam.nodeName.u.wwn);
326         if (vport->phba->cfg_soft_wwpn)
327                 u64_to_wwn(vport->phba->cfg_soft_wwpn,
328                            vport->fc_sparam.portName.u.wwn);
329
330         /*
331          * If the name is empty or there exists a soft name
332          * then copy the service params name, otherwise use the fc name
333          */
334         if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
335                 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
336                         sizeof(struct lpfc_name));
337         else
338                 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
339                         sizeof(struct lpfc_name));
340
341         if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
342                 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
343                         sizeof(struct lpfc_name));
344         else
345                 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
346                         sizeof(struct lpfc_name));
347 }
348
349 /**
350  * lpfc_config_port_post - Perform lpfc initialization after config port
351  * @phba: pointer to lpfc hba data structure.
352  *
353  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
354  * command call. It performs all internal resource and state setups on the
355  * port: post IOCB buffers, enable appropriate host interrupt attentions,
356  * ELS ring timers, etc.
357  *
358  * Return codes
359  *   0 - success.
360  *   Any other value - error.
361  **/
362 int
363 lpfc_config_port_post(struct lpfc_hba *phba)
364 {
365         struct lpfc_vport *vport = phba->pport;
366         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
367         LPFC_MBOXQ_t *pmb;
368         MAILBOX_t *mb;
369         struct lpfc_dmabuf *mp;
370         struct lpfc_sli *psli = &phba->sli;
371         uint32_t status, timeout;
372         int i, j;
373         int rc;
374
375         spin_lock_irq(&phba->hbalock);
376         /*
377          * If the Config port completed correctly the HBA is not
378          * over heated any more.
379          */
380         if (phba->over_temp_state == HBA_OVER_TEMP)
381                 phba->over_temp_state = HBA_NORMAL_TEMP;
382         spin_unlock_irq(&phba->hbalock);
383
384         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
385         if (!pmb) {
386                 phba->link_state = LPFC_HBA_ERROR;
387                 return -ENOMEM;
388         }
389         mb = &pmb->u.mb;
390
391         /* Get login parameters for NID.  */
392         rc = lpfc_read_sparam(phba, pmb, 0);
393         if (rc) {
394                 mempool_free(pmb, phba->mbox_mem_pool);
395                 return -ENOMEM;
396         }
397
398         pmb->vport = vport;
399         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
400                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
401                                 "0448 Adapter failed init, mbxCmd x%x "
402                                 "READ_SPARM mbxStatus x%x\n",
403                                 mb->mbxCommand, mb->mbxStatus);
404                 phba->link_state = LPFC_HBA_ERROR;
405                 mp = (struct lpfc_dmabuf *) pmb->context1;
406                 mempool_free(pmb, phba->mbox_mem_pool);
407                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
408                 kfree(mp);
409                 return -EIO;
410         }
411
412         mp = (struct lpfc_dmabuf *) pmb->context1;
413
414         memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
415         lpfc_mbuf_free(phba, mp->virt, mp->phys);
416         kfree(mp);
417         pmb->context1 = NULL;
418         lpfc_update_vport_wwn(vport);
419
420         /* Update the fc_host data structures with new wwn. */
421         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
422         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
423         fc_host_max_npiv_vports(shost) = phba->max_vpi;
424
425         /* If no serial number in VPD data, use low 6 bytes of WWNN */
426         /* This should be consolidated into parse_vpd ? - mr */
427         if (phba->SerialNumber[0] == 0) {
428                 uint8_t *outptr;
429
430                 outptr = &vport->fc_nodename.u.s.IEEE[0];
431                 for (i = 0; i < 12; i++) {
432                         status = *outptr++;
433                         j = ((status & 0xf0) >> 4);
434                         if (j <= 9)
435                                 phba->SerialNumber[i] =
436                                     (char)((uint8_t) 0x30 + (uint8_t) j);
437                         else
438                                 phba->SerialNumber[i] =
439                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
440                         i++;
441                         j = (status & 0xf);
442                         if (j <= 9)
443                                 phba->SerialNumber[i] =
444                                     (char)((uint8_t) 0x30 + (uint8_t) j);
445                         else
446                                 phba->SerialNumber[i] =
447                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
448                 }
449         }
450
451         lpfc_read_config(phba, pmb);
452         pmb->vport = vport;
453         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
454                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
455                                 "0453 Adapter failed to init, mbxCmd x%x "
456                                 "READ_CONFIG, mbxStatus x%x\n",
457                                 mb->mbxCommand, mb->mbxStatus);
458                 phba->link_state = LPFC_HBA_ERROR;
459                 mempool_free( pmb, phba->mbox_mem_pool);
460                 return -EIO;
461         }
462
463         /* Check if the port is disabled */
464         lpfc_sli_read_link_ste(phba);
465
466         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
467         if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
468                 phba->cfg_hba_queue_depth =
469                         (mb->un.varRdConfig.max_xri + 1) -
470                                         lpfc_sli4_get_els_iocb_cnt(phba);
471
472         phba->lmt = mb->un.varRdConfig.lmt;
473
474         /* Get the default values for Model Name and Description */
475         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
476
477         phba->link_state = LPFC_LINK_DOWN;
478
479         /* Only process IOCBs on ELS ring till hba_state is READY */
480         if (psli->ring[psli->extra_ring].cmdringaddr)
481                 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
482         if (psli->ring[psli->fcp_ring].cmdringaddr)
483                 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
484         if (psli->ring[psli->next_ring].cmdringaddr)
485                 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
486
487         /* Post receive buffers for desired rings */
488         if (phba->sli_rev != 3)
489                 lpfc_post_rcv_buf(phba);
490
491         /*
492          * Configure HBA MSI-X attention conditions to messages if MSI-X mode
493          */
494         if (phba->intr_type == MSIX) {
495                 rc = lpfc_config_msi(phba, pmb);
496                 if (rc) {
497                         mempool_free(pmb, phba->mbox_mem_pool);
498                         return -EIO;
499                 }
500                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
501                 if (rc != MBX_SUCCESS) {
502                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
503                                         "0352 Config MSI mailbox command "
504                                         "failed, mbxCmd x%x, mbxStatus x%x\n",
505                                         pmb->u.mb.mbxCommand,
506                                         pmb->u.mb.mbxStatus);
507                         mempool_free(pmb, phba->mbox_mem_pool);
508                         return -EIO;
509                 }
510         }
511
512         spin_lock_irq(&phba->hbalock);
513         /* Initialize ERATT handling flag */
514         phba->hba_flag &= ~HBA_ERATT_HANDLED;
515
516         /* Enable appropriate host interrupts */
517         if (lpfc_readl(phba->HCregaddr, &status)) {
518                 spin_unlock_irq(&phba->hbalock);
519                 return -EIO;
520         }
521         status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
522         if (psli->num_rings > 0)
523                 status |= HC_R0INT_ENA;
524         if (psli->num_rings > 1)
525                 status |= HC_R1INT_ENA;
526         if (psli->num_rings > 2)
527                 status |= HC_R2INT_ENA;
528         if (psli->num_rings > 3)
529                 status |= HC_R3INT_ENA;
530
531         if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
532             (phba->cfg_poll & DISABLE_FCP_RING_INT))
533                 status &= ~(HC_R0INT_ENA);
534
535         writel(status, phba->HCregaddr);
536         readl(phba->HCregaddr); /* flush */
537         spin_unlock_irq(&phba->hbalock);
538
539         /* Set up ring-0 (ELS) timer */
540         timeout = phba->fc_ratov * 2;
541         mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
542         /* Set up heart beat (HB) timer */
543         mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
544         phba->hb_outstanding = 0;
545         phba->last_completion_time = jiffies;
546         /* Set up error attention (ERATT) polling timer */
547         mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
548
549         if (phba->hba_flag & LINK_DISABLED) {
550                 lpfc_printf_log(phba,
551                         KERN_ERR, LOG_INIT,
552                         "2598 Adapter Link is disabled.\n");
553                 lpfc_down_link(phba, pmb);
554                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
555                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
556                 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
557                         lpfc_printf_log(phba,
558                         KERN_ERR, LOG_INIT,
559                         "2599 Adapter failed to issue DOWN_LINK"
560                         " mbox command rc 0x%x\n", rc);
561
562                         mempool_free(pmb, phba->mbox_mem_pool);
563                         return -EIO;
564                 }
565         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
566                 mempool_free(pmb, phba->mbox_mem_pool);
567                 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
568                 if (rc)
569                         return rc;
570         }
571         /* MBOX buffer will be freed in mbox compl */
572         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
573         if (!pmb) {
574                 phba->link_state = LPFC_HBA_ERROR;
575                 return -ENOMEM;
576         }
577
578         lpfc_config_async(phba, pmb, LPFC_ELS_RING);
579         pmb->mbox_cmpl = lpfc_config_async_cmpl;
580         pmb->vport = phba->pport;
581         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
582
583         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
584                 lpfc_printf_log(phba,
585                                 KERN_ERR,
586                                 LOG_INIT,
587                                 "0456 Adapter failed to issue "
588                                 "ASYNCEVT_ENABLE mbox status x%x\n",
589                                 rc);
590                 mempool_free(pmb, phba->mbox_mem_pool);
591         }
592
593         /* Get Option rom version */
594         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
595         if (!pmb) {
596                 phba->link_state = LPFC_HBA_ERROR;
597                 return -ENOMEM;
598         }
599
600         lpfc_dump_wakeup_param(phba, pmb);
601         pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
602         pmb->vport = phba->pport;
603         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
604
605         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
606                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
607                                 "to get Option ROM version status x%x\n", rc);
608                 mempool_free(pmb, phba->mbox_mem_pool);
609         }
610
611         return 0;
612 }
613
614 /**
615  * lpfc_hba_init_link - Initialize the FC link
616  * @phba: pointer to lpfc hba data structure.
617  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
618  *
619  * This routine will issue the INIT_LINK mailbox command call.
620  * It is available to other drivers through the lpfc_hba data
621  * structure for use as a delayed link up mechanism with the
622  * module parameter lpfc_suppress_link_up.
623  *
624  * Return code
625  *              0 - success
626  *              Any other value - error
627  **/
628 int
629 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
630 {
631         return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
632 }
633
634 /**
635  * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
636  * @phba: pointer to lpfc hba data structure.
637  * @fc_topology: desired fc topology.
638  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
639  *
640  * This routine will issue the INIT_LINK mailbox command call.
641  * It is available to other drivers through the lpfc_hba data
642  * structure for use as a delayed link up mechanism with the
643  * module parameter lpfc_suppress_link_up.
644  *
645  * Return code
646  *              0 - success
647  *              Any other value - error
648  **/
649 int
650 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
651                                uint32_t flag)
652 {
653         struct lpfc_vport *vport = phba->pport;
654         LPFC_MBOXQ_t *pmb;
655         MAILBOX_t *mb;
656         int rc;
657
658         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
659         if (!pmb) {
660                 phba->link_state = LPFC_HBA_ERROR;
661                 return -ENOMEM;
662         }
663         mb = &pmb->u.mb;
664         pmb->vport = vport;
665
666         if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
667             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
668              !(phba->lmt & LMT_1Gb)) ||
669             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
670              !(phba->lmt & LMT_2Gb)) ||
671             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
672              !(phba->lmt & LMT_4Gb)) ||
673             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
674              !(phba->lmt & LMT_8Gb)) ||
675             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
676              !(phba->lmt & LMT_10Gb)) ||
677             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
678              !(phba->lmt & LMT_16Gb))) {
679                 /* Reset link speed to auto */
680                 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
681                         "1302 Invalid speed for this board:%d "
682                         "Reset link speed to auto.\n",
683                         phba->cfg_link_speed);
684                         phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
685         }
686         lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
687         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
688         if (phba->sli_rev < LPFC_SLI_REV4)
689                 lpfc_set_loopback_flag(phba);
690         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
691         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
692                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
693                         "0498 Adapter failed to init, mbxCmd x%x "
694                         "INIT_LINK, mbxStatus x%x\n",
695                         mb->mbxCommand, mb->mbxStatus);
696                 if (phba->sli_rev <= LPFC_SLI_REV3) {
697                         /* Clear all interrupt enable conditions */
698                         writel(0, phba->HCregaddr);
699                         readl(phba->HCregaddr); /* flush */
700                         /* Clear all pending interrupts */
701                         writel(0xffffffff, phba->HAregaddr);
702                         readl(phba->HAregaddr); /* flush */
703                 }
704                 phba->link_state = LPFC_HBA_ERROR;
705                 if (rc != MBX_BUSY || flag == MBX_POLL)
706                         mempool_free(pmb, phba->mbox_mem_pool);
707                 return -EIO;
708         }
709         phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
710         if (flag == MBX_POLL)
711                 mempool_free(pmb, phba->mbox_mem_pool);
712
713         return 0;
714 }
715
716 /**
717  * lpfc_hba_down_link - this routine downs the FC link
718  * @phba: pointer to lpfc hba data structure.
719  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
720  *
721  * This routine will issue the DOWN_LINK mailbox command call.
722  * It is available to other drivers through the lpfc_hba data
723  * structure for use to stop the link.
724  *
725  * Return code
726  *              0 - success
727  *              Any other value - error
728  **/
729 int
730 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
731 {
732         LPFC_MBOXQ_t *pmb;
733         int rc;
734
735         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
736         if (!pmb) {
737                 phba->link_state = LPFC_HBA_ERROR;
738                 return -ENOMEM;
739         }
740
741         lpfc_printf_log(phba,
742                 KERN_ERR, LOG_INIT,
743                 "0491 Adapter Link is disabled.\n");
744         lpfc_down_link(phba, pmb);
745         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
746         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
747         if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
748                 lpfc_printf_log(phba,
749                 KERN_ERR, LOG_INIT,
750                 "2522 Adapter failed to issue DOWN_LINK"
751                 " mbox command rc 0x%x\n", rc);
752
753                 mempool_free(pmb, phba->mbox_mem_pool);
754                 return -EIO;
755         }
756         if (flag == MBX_POLL)
757                 mempool_free(pmb, phba->mbox_mem_pool);
758
759         return 0;
760 }
761
762 /**
763  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
764  * @phba: pointer to lpfc HBA data structure.
765  *
766  * This routine will do LPFC uninitialization before the HBA is reset when
767  * bringing down the SLI Layer.
768  *
769  * Return codes
770  *   0 - success.
771  *   Any other value - error.
772  **/
773 int
774 lpfc_hba_down_prep(struct lpfc_hba *phba)
775 {
776         struct lpfc_vport **vports;
777         int i;
778
779         if (phba->sli_rev <= LPFC_SLI_REV3) {
780                 /* Disable interrupts */
781                 writel(0, phba->HCregaddr);
782                 readl(phba->HCregaddr); /* flush */
783         }
784
785         if (phba->pport->load_flag & FC_UNLOADING)
786                 lpfc_cleanup_discovery_resources(phba->pport);
787         else {
788                 vports = lpfc_create_vport_work_array(phba);
789                 if (vports != NULL)
790                         for (i = 0; i <= phba->max_vports &&
791                                 vports[i] != NULL; i++)
792                                 lpfc_cleanup_discovery_resources(vports[i]);
793                 lpfc_destroy_vport_work_array(phba, vports);
794         }
795         return 0;
796 }
797
798 /**
799  * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
800  * @phba: pointer to lpfc HBA data structure.
801  *
802  * This routine will do uninitialization after the HBA is reset when bring
803  * down the SLI Layer.
804  *
805  * Return codes
806  *   0 - success.
807  *   Any other value - error.
808  **/
809 static int
810 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
811 {
812         struct lpfc_sli *psli = &phba->sli;
813         struct lpfc_sli_ring *pring;
814         struct lpfc_dmabuf *mp, *next_mp;
815         LIST_HEAD(completions);
816         int i;
817
818         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
819                 lpfc_sli_hbqbuf_free_all(phba);
820         else {
821                 /* Cleanup preposted buffers on the ELS ring */
822                 pring = &psli->ring[LPFC_ELS_RING];
823                 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
824                         list_del(&mp->list);
825                         pring->postbufq_cnt--;
826                         lpfc_mbuf_free(phba, mp->virt, mp->phys);
827                         kfree(mp);
828                 }
829         }
830
831         spin_lock_irq(&phba->hbalock);
832         for (i = 0; i < psli->num_rings; i++) {
833                 pring = &psli->ring[i];
834
835                 /* At this point in time the HBA is either reset or DOA. Either
836                  * way, nothing should be on txcmplq as it will NEVER complete.
837                  */
838                 list_splice_init(&pring->txcmplq, &completions);
839                 pring->txcmplq_cnt = 0;
840                 spin_unlock_irq(&phba->hbalock);
841
842                 /* Cancel all the IOCBs from the completions list */
843                 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
844                                       IOERR_SLI_ABORTED);
845
846                 lpfc_sli_abort_iocb_ring(phba, pring);
847                 spin_lock_irq(&phba->hbalock);
848         }
849         spin_unlock_irq(&phba->hbalock);
850
851         return 0;
852 }
853
854 /**
855  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
856  * @phba: pointer to lpfc HBA data structure.
857  *
858  * This routine will do uninitialization after the HBA is reset when bring
859  * down the SLI Layer.
860  *
861  * Return codes
862  *   0 - success.
863  *   Any other value - error.
864  **/
865 static int
866 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
867 {
868         struct lpfc_scsi_buf *psb, *psb_next;
869         LIST_HEAD(aborts);
870         int ret;
871         unsigned long iflag = 0;
872         struct lpfc_sglq *sglq_entry = NULL;
873
874         ret = lpfc_hba_down_post_s3(phba);
875         if (ret)
876                 return ret;
877         /* At this point in time the HBA is either reset or DOA. Either
878          * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
879          * on the lpfc_sgl_list so that it can either be freed if the
880          * driver is unloading or reposted if the driver is restarting
881          * the port.
882          */
883         spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
884                                         /* scsl_buf_list */
885         /* abts_sgl_list_lock required because worker thread uses this
886          * list.
887          */
888         spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
889         list_for_each_entry(sglq_entry,
890                 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
891                 sglq_entry->state = SGL_FREED;
892
893         list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
894                         &phba->sli4_hba.lpfc_sgl_list);
895         spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
896         /* abts_scsi_buf_list_lock required because worker thread uses this
897          * list.
898          */
899         spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
900         list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
901                         &aborts);
902         spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
903         spin_unlock_irq(&phba->hbalock);
904
905         list_for_each_entry_safe(psb, psb_next, &aborts, list) {
906                 psb->pCmd = NULL;
907                 psb->status = IOSTAT_SUCCESS;
908         }
909         spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
910         list_splice(&aborts, &phba->lpfc_scsi_buf_list);
911         spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
912         return 0;
913 }
914
915 /**
916  * lpfc_hba_down_post - Wrapper func for hba down post routine
917  * @phba: pointer to lpfc HBA data structure.
918  *
919  * This routine wraps the actual SLI3 or SLI4 routine for performing
920  * uninitialization after the HBA is reset when bring down the SLI Layer.
921  *
922  * Return codes
923  *   0 - success.
924  *   Any other value - error.
925  **/
926 int
927 lpfc_hba_down_post(struct lpfc_hba *phba)
928 {
929         return (*phba->lpfc_hba_down_post)(phba);
930 }
931
932 /**
933  * lpfc_hb_timeout - The HBA-timer timeout handler
934  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
935  *
936  * This is the HBA-timer timeout handler registered to the lpfc driver. When
937  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
938  * work-port-events bitmap and the worker thread is notified. This timeout
939  * event will be used by the worker thread to invoke the actual timeout
940  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
941  * be performed in the timeout handler and the HBA timeout event bit shall
942  * be cleared by the worker thread after it has taken the event bitmap out.
943  **/
944 static void
945 lpfc_hb_timeout(unsigned long ptr)
946 {
947         struct lpfc_hba *phba;
948         uint32_t tmo_posted;
949         unsigned long iflag;
950
951         phba = (struct lpfc_hba *)ptr;
952
953         /* Check for heart beat timeout conditions */
954         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
955         tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
956         if (!tmo_posted)
957                 phba->pport->work_port_events |= WORKER_HB_TMO;
958         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
959
960         /* Tell the worker thread there is work to do */
961         if (!tmo_posted)
962                 lpfc_worker_wake_up(phba);
963         return;
964 }
965
966 /**
967  * lpfc_rrq_timeout - The RRQ-timer timeout handler
968  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
969  *
970  * This is the RRQ-timer timeout handler registered to the lpfc driver. When
971  * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
972  * work-port-events bitmap and the worker thread is notified. This timeout
973  * event will be used by the worker thread to invoke the actual timeout
974  * handler routine, lpfc_rrq_handler. Any periodical operations will
975  * be performed in the timeout handler and the RRQ timeout event bit shall
976  * be cleared by the worker thread after it has taken the event bitmap out.
977  **/
978 static void
979 lpfc_rrq_timeout(unsigned long ptr)
980 {
981         struct lpfc_hba *phba;
982         unsigned long iflag;
983
984         phba = (struct lpfc_hba *)ptr;
985         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
986         phba->hba_flag |= HBA_RRQ_ACTIVE;
987         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
988         lpfc_worker_wake_up(phba);
989 }
990
991 /**
992  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
993  * @phba: pointer to lpfc hba data structure.
994  * @pmboxq: pointer to the driver internal queue element for mailbox command.
995  *
996  * This is the callback function to the lpfc heart-beat mailbox command.
997  * If configured, the lpfc driver issues the heart-beat mailbox command to
998  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
999  * heart-beat mailbox command is issued, the driver shall set up heart-beat
1000  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1001  * heart-beat outstanding state. Once the mailbox command comes back and
1002  * no error conditions detected, the heart-beat mailbox command timer is
1003  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1004  * state is cleared for the next heart-beat. If the timer expired with the
1005  * heart-beat outstanding state set, the driver will put the HBA offline.
1006  **/
1007 static void
1008 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1009 {
1010         unsigned long drvr_flag;
1011
1012         spin_lock_irqsave(&phba->hbalock, drvr_flag);
1013         phba->hb_outstanding = 0;
1014         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1015
1016         /* Check and reset heart-beat timer is necessary */
1017         mempool_free(pmboxq, phba->mbox_mem_pool);
1018         if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1019                 !(phba->link_state == LPFC_HBA_ERROR) &&
1020                 !(phba->pport->load_flag & FC_UNLOADING))
1021                 mod_timer(&phba->hb_tmofunc,
1022                         jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1023         return;
1024 }
1025
1026 /**
1027  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1028  * @phba: pointer to lpfc hba data structure.
1029  *
1030  * This is the actual HBA-timer timeout handler to be invoked by the worker
1031  * thread whenever the HBA timer fired and HBA-timeout event posted. This
1032  * handler performs any periodic operations needed for the device. If such
1033  * periodic event has already been attended to either in the interrupt handler
1034  * or by processing slow-ring or fast-ring events within the HBA-timer
1035  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1036  * the timer for the next timeout period. If lpfc heart-beat mailbox command
1037  * is configured and there is no heart-beat mailbox command outstanding, a
1038  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1039  * has been a heart-beat mailbox command outstanding, the HBA shall be put
1040  * to offline.
1041  **/
1042 void
1043 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1044 {
1045         struct lpfc_vport **vports;
1046         LPFC_MBOXQ_t *pmboxq;
1047         struct lpfc_dmabuf *buf_ptr;
1048         int retval, i;
1049         struct lpfc_sli *psli = &phba->sli;
1050         LIST_HEAD(completions);
1051
1052         vports = lpfc_create_vport_work_array(phba);
1053         if (vports != NULL)
1054                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1055                         lpfc_rcv_seq_check_edtov(vports[i]);
1056         lpfc_destroy_vport_work_array(phba, vports);
1057
1058         if ((phba->link_state == LPFC_HBA_ERROR) ||
1059                 (phba->pport->load_flag & FC_UNLOADING) ||
1060                 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1061                 return;
1062
1063         spin_lock_irq(&phba->pport->work_port_lock);
1064
1065         if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1066                 jiffies)) {
1067                 spin_unlock_irq(&phba->pport->work_port_lock);
1068                 if (!phba->hb_outstanding)
1069                         mod_timer(&phba->hb_tmofunc,
1070                                 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1071                 else
1072                         mod_timer(&phba->hb_tmofunc,
1073                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1074                 return;
1075         }
1076         spin_unlock_irq(&phba->pport->work_port_lock);
1077
1078         if (phba->elsbuf_cnt &&
1079                 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1080                 spin_lock_irq(&phba->hbalock);
1081                 list_splice_init(&phba->elsbuf, &completions);
1082                 phba->elsbuf_cnt = 0;
1083                 phba->elsbuf_prev_cnt = 0;
1084                 spin_unlock_irq(&phba->hbalock);
1085
1086                 while (!list_empty(&completions)) {
1087                         list_remove_head(&completions, buf_ptr,
1088                                 struct lpfc_dmabuf, list);
1089                         lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1090                         kfree(buf_ptr);
1091                 }
1092         }
1093         phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1094
1095         /* If there is no heart beat outstanding, issue a heartbeat command */
1096         if (phba->cfg_enable_hba_heartbeat) {
1097                 if (!phba->hb_outstanding) {
1098                         if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1099                                 (list_empty(&psli->mboxq))) {
1100                                 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1101                                                         GFP_KERNEL);
1102                                 if (!pmboxq) {
1103                                         mod_timer(&phba->hb_tmofunc,
1104                                                  jiffies +
1105                                                  HZ * LPFC_HB_MBOX_INTERVAL);
1106                                         return;
1107                                 }
1108
1109                                 lpfc_heart_beat(phba, pmboxq);
1110                                 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1111                                 pmboxq->vport = phba->pport;
1112                                 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1113                                                 MBX_NOWAIT);
1114
1115                                 if (retval != MBX_BUSY &&
1116                                         retval != MBX_SUCCESS) {
1117                                         mempool_free(pmboxq,
1118                                                         phba->mbox_mem_pool);
1119                                         mod_timer(&phba->hb_tmofunc,
1120                                                 jiffies +
1121                                                 HZ * LPFC_HB_MBOX_INTERVAL);
1122                                         return;
1123                                 }
1124                                 phba->skipped_hb = 0;
1125                                 phba->hb_outstanding = 1;
1126                         } else if (time_before_eq(phba->last_completion_time,
1127                                         phba->skipped_hb)) {
1128                                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1129                                         "2857 Last completion time not "
1130                                         " updated in %d ms\n",
1131                                         jiffies_to_msecs(jiffies
1132                                                  - phba->last_completion_time));
1133                         } else
1134                                 phba->skipped_hb = jiffies;
1135
1136                         mod_timer(&phba->hb_tmofunc,
1137                                   jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1138                         return;
1139                 } else {
1140                         /*
1141                         * If heart beat timeout called with hb_outstanding set
1142                         * we need to give the hb mailbox cmd a chance to
1143                         * complete or TMO.
1144                         */
1145                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1146                                         "0459 Adapter heartbeat still out"
1147                                         "standing:last compl time was %d ms.\n",
1148                                         jiffies_to_msecs(jiffies
1149                                                  - phba->last_completion_time));
1150                         mod_timer(&phba->hb_tmofunc,
1151                                   jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1152                 }
1153         }
1154 }
1155
1156 /**
1157  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1158  * @phba: pointer to lpfc hba data structure.
1159  *
1160  * This routine is called to bring the HBA offline when HBA hardware error
1161  * other than Port Error 6 has been detected.
1162  **/
1163 static void
1164 lpfc_offline_eratt(struct lpfc_hba *phba)
1165 {
1166         struct lpfc_sli   *psli = &phba->sli;
1167
1168         spin_lock_irq(&phba->hbalock);
1169         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1170         spin_unlock_irq(&phba->hbalock);
1171         lpfc_offline_prep(phba);
1172
1173         lpfc_offline(phba);
1174         lpfc_reset_barrier(phba);
1175         spin_lock_irq(&phba->hbalock);
1176         lpfc_sli_brdreset(phba);
1177         spin_unlock_irq(&phba->hbalock);
1178         lpfc_hba_down_post(phba);
1179         lpfc_sli_brdready(phba, HS_MBRDY);
1180         lpfc_unblock_mgmt_io(phba);
1181         phba->link_state = LPFC_HBA_ERROR;
1182         return;
1183 }
1184
1185 /**
1186  * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1187  * @phba: pointer to lpfc hba data structure.
1188  *
1189  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1190  * other than Port Error 6 has been detected.
1191  **/
1192 static void
1193 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1194 {
1195         lpfc_offline_prep(phba);
1196         lpfc_offline(phba);
1197         lpfc_sli4_brdreset(phba);
1198         lpfc_hba_down_post(phba);
1199         lpfc_sli4_post_status_check(phba);
1200         lpfc_unblock_mgmt_io(phba);
1201         phba->link_state = LPFC_HBA_ERROR;
1202 }
1203
1204 /**
1205  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1206  * @phba: pointer to lpfc hba data structure.
1207  *
1208  * This routine is invoked to handle the deferred HBA hardware error
1209  * conditions. This type of error is indicated by HBA by setting ER1
1210  * and another ER bit in the host status register. The driver will
1211  * wait until the ER1 bit clears before handling the error condition.
1212  **/
1213 static void
1214 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1215 {
1216         uint32_t old_host_status = phba->work_hs;
1217         struct lpfc_sli_ring  *pring;
1218         struct lpfc_sli *psli = &phba->sli;
1219
1220         /* If the pci channel is offline, ignore possible errors,
1221          * since we cannot communicate with the pci card anyway.
1222          */
1223         if (pci_channel_offline(phba->pcidev)) {
1224                 spin_lock_irq(&phba->hbalock);
1225                 phba->hba_flag &= ~DEFER_ERATT;
1226                 spin_unlock_irq(&phba->hbalock);
1227                 return;
1228         }
1229
1230         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1231                 "0479 Deferred Adapter Hardware Error "
1232                 "Data: x%x x%x x%x\n",
1233                 phba->work_hs,
1234                 phba->work_status[0], phba->work_status[1]);
1235
1236         spin_lock_irq(&phba->hbalock);
1237         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1238         spin_unlock_irq(&phba->hbalock);
1239
1240
1241         /*
1242          * Firmware stops when it triggred erratt. That could cause the I/Os
1243          * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1244          * SCSI layer retry it after re-establishing link.
1245          */
1246         pring = &psli->ring[psli->fcp_ring];
1247         lpfc_sli_abort_iocb_ring(phba, pring);
1248
1249         /*
1250          * There was a firmware error. Take the hba offline and then
1251          * attempt to restart it.
1252          */
1253         lpfc_offline_prep(phba);
1254         lpfc_offline(phba);
1255
1256         /* Wait for the ER1 bit to clear.*/
1257         while (phba->work_hs & HS_FFER1) {
1258                 msleep(100);
1259                 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1260                         phba->work_hs = UNPLUG_ERR ;
1261                         break;
1262                 }
1263                 /* If driver is unloading let the worker thread continue */
1264                 if (phba->pport->load_flag & FC_UNLOADING) {
1265                         phba->work_hs = 0;
1266                         break;
1267                 }
1268         }
1269
1270         /*
1271          * This is to ptrotect against a race condition in which
1272          * first write to the host attention register clear the
1273          * host status register.
1274          */
1275         if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1276                 phba->work_hs = old_host_status & ~HS_FFER1;
1277
1278         spin_lock_irq(&phba->hbalock);
1279         phba->hba_flag &= ~DEFER_ERATT;
1280         spin_unlock_irq(&phba->hbalock);
1281         phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1282         phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1283 }
1284
1285 static void
1286 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1287 {
1288         struct lpfc_board_event_header board_event;
1289         struct Scsi_Host *shost;
1290
1291         board_event.event_type = FC_REG_BOARD_EVENT;
1292         board_event.subcategory = LPFC_EVENT_PORTINTERR;
1293         shost = lpfc_shost_from_vport(phba->pport);
1294         fc_host_post_vendor_event(shost, fc_get_event_number(),
1295                                   sizeof(board_event),
1296                                   (char *) &board_event,
1297                                   LPFC_NL_VENDOR_ID);
1298 }
1299
1300 /**
1301  * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1302  * @phba: pointer to lpfc hba data structure.
1303  *
1304  * This routine is invoked to handle the following HBA hardware error
1305  * conditions:
1306  * 1 - HBA error attention interrupt
1307  * 2 - DMA ring index out of range
1308  * 3 - Mailbox command came back as unknown
1309  **/
1310 static void
1311 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1312 {
1313         struct lpfc_vport *vport = phba->pport;
1314         struct lpfc_sli   *psli = &phba->sli;
1315         struct lpfc_sli_ring  *pring;
1316         uint32_t event_data;
1317         unsigned long temperature;
1318         struct temp_event temp_event_data;
1319         struct Scsi_Host  *shost;
1320
1321         /* If the pci channel is offline, ignore possible errors,
1322          * since we cannot communicate with the pci card anyway.
1323          */
1324         if (pci_channel_offline(phba->pcidev)) {
1325                 spin_lock_irq(&phba->hbalock);
1326                 phba->hba_flag &= ~DEFER_ERATT;
1327                 spin_unlock_irq(&phba->hbalock);
1328                 return;
1329         }
1330
1331         /* If resets are disabled then leave the HBA alone and return */
1332         if (!phba->cfg_enable_hba_reset)
1333                 return;
1334
1335         /* Send an internal error event to mgmt application */
1336         lpfc_board_errevt_to_mgmt(phba);
1337
1338         if (phba->hba_flag & DEFER_ERATT)
1339                 lpfc_handle_deferred_eratt(phba);
1340
1341         if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1342                 if (phba->work_hs & HS_FFER6)
1343                         /* Re-establishing Link */
1344                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1345                                         "1301 Re-establishing Link "
1346                                         "Data: x%x x%x x%x\n",
1347                                         phba->work_hs, phba->work_status[0],
1348                                         phba->work_status[1]);
1349                 if (phba->work_hs & HS_FFER8)
1350                         /* Device Zeroization */
1351                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1352                                         "2861 Host Authentication device "
1353                                         "zeroization Data:x%x x%x x%x\n",
1354                                         phba->work_hs, phba->work_status[0],
1355                                         phba->work_status[1]);
1356
1357                 spin_lock_irq(&phba->hbalock);
1358                 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1359                 spin_unlock_irq(&phba->hbalock);
1360
1361                 /*
1362                 * Firmware stops when it triggled erratt with HS_FFER6.
1363                 * That could cause the I/Os dropped by the firmware.
1364                 * Error iocb (I/O) on txcmplq and let the SCSI layer
1365                 * retry it after re-establishing link.
1366                 */
1367                 pring = &psli->ring[psli->fcp_ring];
1368                 lpfc_sli_abort_iocb_ring(phba, pring);
1369
1370                 /*
1371                  * There was a firmware error.  Take the hba offline and then
1372                  * attempt to restart it.
1373                  */
1374                 lpfc_offline_prep(phba);
1375                 lpfc_offline(phba);
1376                 lpfc_sli_brdrestart(phba);
1377                 if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
1378                         lpfc_unblock_mgmt_io(phba);
1379                         return;
1380                 }
1381                 lpfc_unblock_mgmt_io(phba);
1382         } else if (phba->work_hs & HS_CRIT_TEMP) {
1383                 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1384                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1385                 temp_event_data.event_code = LPFC_CRIT_TEMP;
1386                 temp_event_data.data = (uint32_t)temperature;
1387
1388                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1389                                 "0406 Adapter maximum temperature exceeded "
1390                                 "(%ld), taking this port offline "
1391                                 "Data: x%x x%x x%x\n",
1392                                 temperature, phba->work_hs,
1393                                 phba->work_status[0], phba->work_status[1]);
1394
1395                 shost = lpfc_shost_from_vport(phba->pport);
1396                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1397                                           sizeof(temp_event_data),
1398                                           (char *) &temp_event_data,
1399                                           SCSI_NL_VID_TYPE_PCI
1400                                           | PCI_VENDOR_ID_EMULEX);
1401
1402                 spin_lock_irq(&phba->hbalock);
1403                 phba->over_temp_state = HBA_OVER_TEMP;
1404                 spin_unlock_irq(&phba->hbalock);
1405                 lpfc_offline_eratt(phba);
1406
1407         } else {
1408                 /* The if clause above forces this code path when the status
1409                  * failure is a value other than FFER6. Do not call the offline
1410                  * twice. This is the adapter hardware error path.
1411                  */
1412                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1413                                 "0457 Adapter Hardware Error "
1414                                 "Data: x%x x%x x%x\n",
1415                                 phba->work_hs,
1416                                 phba->work_status[0], phba->work_status[1]);
1417
1418                 event_data = FC_REG_DUMP_EVENT;
1419                 shost = lpfc_shost_from_vport(vport);
1420                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1421                                 sizeof(event_data), (char *) &event_data,
1422                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1423
1424                 lpfc_offline_eratt(phba);
1425         }
1426         return;
1427 }
1428
1429 /**
1430  * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1431  * @phba: pointer to lpfc hba data structure.
1432  *
1433  * This routine is invoked to handle the SLI4 HBA hardware error attention
1434  * conditions.
1435  **/
1436 static void
1437 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1438 {
1439         struct lpfc_vport *vport = phba->pport;
1440         uint32_t event_data;
1441         struct Scsi_Host *shost;
1442         uint32_t if_type;
1443         struct lpfc_register portstat_reg = {0};
1444         uint32_t reg_err1, reg_err2;
1445         uint32_t uerrlo_reg, uemasklo_reg;
1446         uint32_t pci_rd_rc1, pci_rd_rc2;
1447         int rc;
1448
1449         /* If the pci channel is offline, ignore possible errors, since
1450          * we cannot communicate with the pci card anyway.
1451          */
1452         if (pci_channel_offline(phba->pcidev))
1453                 return;
1454         /* If resets are disabled then leave the HBA alone and return */
1455         if (!phba->cfg_enable_hba_reset)
1456                 return;
1457
1458         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1459         switch (if_type) {
1460         case LPFC_SLI_INTF_IF_TYPE_0:
1461                 pci_rd_rc1 = lpfc_readl(
1462                                 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1463                                 &uerrlo_reg);
1464                 pci_rd_rc2 = lpfc_readl(
1465                                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1466                                 &uemasklo_reg);
1467                 /* consider PCI bus read error as pci_channel_offline */
1468                 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1469                         return;
1470                 lpfc_sli4_offline_eratt(phba);
1471                 break;
1472         case LPFC_SLI_INTF_IF_TYPE_2:
1473                 pci_rd_rc1 = lpfc_readl(
1474                                 phba->sli4_hba.u.if_type2.STATUSregaddr,
1475                                 &portstat_reg.word0);
1476                 /* consider PCI bus read error as pci_channel_offline */
1477                 if (pci_rd_rc1 == -EIO)
1478                         return;
1479                 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1480                 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1481                 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1482                         /* TODO: Register for Overtemp async events. */
1483                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1484                                 "2889 Port Overtemperature event, "
1485                                 "taking port offline\n");
1486                         spin_lock_irq(&phba->hbalock);
1487                         phba->over_temp_state = HBA_OVER_TEMP;
1488                         spin_unlock_irq(&phba->hbalock);
1489                         lpfc_sli4_offline_eratt(phba);
1490                         break;
1491                 }
1492                 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1493                     reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
1494                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1495                                         "3143 Port Down: Firmware Restarted\n");
1496                 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1497                          reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1498                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1499                                         "3144 Port Down: Debug Dump\n");
1500                 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1501                          reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1502                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1503                                         "3145 Port Down: Provisioning\n");
1504                 /*
1505                  * On error status condition, driver need to wait for port
1506                  * ready before performing reset.
1507                  */
1508                 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1509                 if (!rc) {
1510                         /* need reset: attempt for port recovery */
1511                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1512                                         "2887 Reset Needed: Attempting Port "
1513                                         "Recovery...\n");
1514                         lpfc_offline_prep(phba);
1515                         lpfc_offline(phba);
1516                         lpfc_sli_brdrestart(phba);
1517                         if (lpfc_online(phba) == 0) {
1518                                 lpfc_unblock_mgmt_io(phba);
1519                                 /* don't report event on forced debug dump */
1520                                 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1521                                     reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1522                                         return;
1523                                 else
1524                                         break;
1525                         }
1526                         /* fall through for not able to recover */
1527                 }
1528                 lpfc_sli4_offline_eratt(phba);
1529                 break;
1530         case LPFC_SLI_INTF_IF_TYPE_1:
1531         default:
1532                 break;
1533         }
1534         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1535                         "3123 Report dump event to upper layer\n");
1536         /* Send an internal error event to mgmt application */
1537         lpfc_board_errevt_to_mgmt(phba);
1538
1539         event_data = FC_REG_DUMP_EVENT;
1540         shost = lpfc_shost_from_vport(vport);
1541         fc_host_post_vendor_event(shost, fc_get_event_number(),
1542                                   sizeof(event_data), (char *) &event_data,
1543                                   SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1544 }
1545
1546 /**
1547  * lpfc_handle_eratt - Wrapper func for handling hba error attention
1548  * @phba: pointer to lpfc HBA data structure.
1549  *
1550  * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1551  * routine from the API jump table function pointer from the lpfc_hba struct.
1552  *
1553  * Return codes
1554  *   0 - success.
1555  *   Any other value - error.
1556  **/
1557 void
1558 lpfc_handle_eratt(struct lpfc_hba *phba)
1559 {
1560         (*phba->lpfc_handle_eratt)(phba);
1561 }
1562
1563 /**
1564  * lpfc_handle_latt - The HBA link event handler
1565  * @phba: pointer to lpfc hba data structure.
1566  *
1567  * This routine is invoked from the worker thread to handle a HBA host
1568  * attention link event.
1569  **/
1570 void
1571 lpfc_handle_latt(struct lpfc_hba *phba)
1572 {
1573         struct lpfc_vport *vport = phba->pport;
1574         struct lpfc_sli   *psli = &phba->sli;
1575         LPFC_MBOXQ_t *pmb;
1576         volatile uint32_t control;
1577         struct lpfc_dmabuf *mp;
1578         int rc = 0;
1579
1580         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1581         if (!pmb) {
1582                 rc = 1;
1583                 goto lpfc_handle_latt_err_exit;
1584         }
1585
1586         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1587         if (!mp) {
1588                 rc = 2;
1589                 goto lpfc_handle_latt_free_pmb;
1590         }
1591
1592         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1593         if (!mp->virt) {
1594                 rc = 3;
1595                 goto lpfc_handle_latt_free_mp;
1596         }
1597
1598         /* Cleanup any outstanding ELS commands */
1599         lpfc_els_flush_all_cmd(phba);
1600
1601         psli->slistat.link_event++;
1602         lpfc_read_topology(phba, pmb, mp);
1603         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1604         pmb->vport = vport;
1605         /* Block ELS IOCBs until we have processed this mbox command */
1606         phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1607         rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1608         if (rc == MBX_NOT_FINISHED) {
1609                 rc = 4;
1610                 goto lpfc_handle_latt_free_mbuf;
1611         }
1612
1613         /* Clear Link Attention in HA REG */
1614         spin_lock_irq(&phba->hbalock);
1615         writel(HA_LATT, phba->HAregaddr);
1616         readl(phba->HAregaddr); /* flush */
1617         spin_unlock_irq(&phba->hbalock);
1618
1619         return;
1620
1621 lpfc_handle_latt_free_mbuf:
1622         phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1623         lpfc_mbuf_free(phba, mp->virt, mp->phys);
1624 lpfc_handle_latt_free_mp:
1625         kfree(mp);
1626 lpfc_handle_latt_free_pmb:
1627         mempool_free(pmb, phba->mbox_mem_pool);
1628 lpfc_handle_latt_err_exit:
1629         /* Enable Link attention interrupts */
1630         spin_lock_irq(&phba->hbalock);
1631         psli->sli_flag |= LPFC_PROCESS_LA;
1632         control = readl(phba->HCregaddr);
1633         control |= HC_LAINT_ENA;
1634         writel(control, phba->HCregaddr);
1635         readl(phba->HCregaddr); /* flush */
1636
1637         /* Clear Link Attention in HA REG */
1638         writel(HA_LATT, phba->HAregaddr);
1639         readl(phba->HAregaddr); /* flush */
1640         spin_unlock_irq(&phba->hbalock);
1641         lpfc_linkdown(phba);
1642         phba->link_state = LPFC_HBA_ERROR;
1643
1644         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1645                      "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1646
1647         return;
1648 }
1649
1650 /**
1651  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1652  * @phba: pointer to lpfc hba data structure.
1653  * @vpd: pointer to the vital product data.
1654  * @len: length of the vital product data in bytes.
1655  *
1656  * This routine parses the Vital Product Data (VPD). The VPD is treated as
1657  * an array of characters. In this routine, the ModelName, ProgramType, and
1658  * ModelDesc, etc. fields of the phba data structure will be populated.
1659  *
1660  * Return codes
1661  *   0 - pointer to the VPD passed in is NULL
1662  *   1 - success
1663  **/
1664 int
1665 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1666 {
1667         uint8_t lenlo, lenhi;
1668         int Length;
1669         int i, j;
1670         int finished = 0;
1671         int index = 0;
1672
1673         if (!vpd)
1674                 return 0;
1675
1676         /* Vital Product */
1677         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1678                         "0455 Vital Product Data: x%x x%x x%x x%x\n",
1679                         (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1680                         (uint32_t) vpd[3]);
1681         while (!finished && (index < (len - 4))) {
1682                 switch (vpd[index]) {
1683                 case 0x82:
1684                 case 0x91:
1685                         index += 1;
1686                         lenlo = vpd[index];
1687                         index += 1;
1688                         lenhi = vpd[index];
1689                         index += 1;
1690                         i = ((((unsigned short)lenhi) << 8) + lenlo);
1691                         index += i;
1692                         break;
1693                 case 0x90:
1694                         index += 1;
1695                         lenlo = vpd[index];
1696                         index += 1;
1697                         lenhi = vpd[index];
1698                         index += 1;
1699                         Length = ((((unsigned short)lenhi) << 8) + lenlo);
1700                         if (Length > len - index)
1701                                 Length = len - index;
1702                         while (Length > 0) {
1703                         /* Look for Serial Number */
1704                         if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1705                                 index += 2;
1706                                 i = vpd[index];
1707                                 index += 1;
1708                                 j = 0;
1709                                 Length -= (3+i);
1710                                 while(i--) {
1711                                         phba->SerialNumber[j++] = vpd[index++];
1712                                         if (j == 31)
1713                                                 break;
1714                                 }
1715                                 phba->SerialNumber[j] = 0;
1716                                 continue;
1717                         }
1718                         else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1719                                 phba->vpd_flag |= VPD_MODEL_DESC;
1720                                 index += 2;
1721                                 i = vpd[index];
1722                                 index += 1;
1723                                 j = 0;
1724                                 Length -= (3+i);
1725                                 while(i--) {
1726                                         phba->ModelDesc[j++] = vpd[index++];
1727                                         if (j == 255)
1728                                                 break;
1729                                 }
1730                                 phba->ModelDesc[j] = 0;
1731                                 continue;
1732                         }
1733                         else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1734                                 phba->vpd_flag |= VPD_MODEL_NAME;
1735                                 index += 2;
1736                                 i = vpd[index];
1737                                 index += 1;
1738                                 j = 0;
1739                                 Length -= (3+i);
1740                                 while(i--) {
1741                                         phba->ModelName[j++] = vpd[index++];
1742                                         if (j == 79)
1743                                                 break;
1744                                 }
1745                                 phba->ModelName[j] = 0;
1746                                 continue;
1747                         }
1748                         else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1749                                 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1750                                 index += 2;
1751                                 i = vpd[index];
1752                                 index += 1;
1753                                 j = 0;
1754                                 Length -= (3+i);
1755                                 while(i--) {
1756                                         phba->ProgramType[j++] = vpd[index++];
1757                                         if (j == 255)
1758                                                 break;
1759                                 }
1760                                 phba->ProgramType[j] = 0;
1761                                 continue;
1762                         }
1763                         else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1764                                 phba->vpd_flag |= VPD_PORT;
1765                                 index += 2;
1766                                 i = vpd[index];
1767                                 index += 1;
1768                                 j = 0;
1769                                 Length -= (3+i);
1770                                 while(i--) {
1771                                         if ((phba->sli_rev == LPFC_SLI_REV4) &&
1772                                             (phba->sli4_hba.pport_name_sta ==
1773                                              LPFC_SLI4_PPNAME_GET)) {
1774                                                 j++;
1775                                                 index++;
1776                                         } else
1777                                                 phba->Port[j++] = vpd[index++];
1778                                         if (j == 19)
1779                                                 break;
1780                                 }
1781                                 if ((phba->sli_rev != LPFC_SLI_REV4) ||
1782                                     (phba->sli4_hba.pport_name_sta ==
1783                                      LPFC_SLI4_PPNAME_NON))
1784                                         phba->Port[j] = 0;
1785                                 continue;
1786                         }
1787                         else {
1788                                 index += 2;
1789                                 i = vpd[index];
1790                                 index += 1;
1791                                 index += i;
1792                                 Length -= (3 + i);
1793                         }
1794                 }
1795                 finished = 0;
1796                 break;
1797                 case 0x78:
1798                         finished = 1;
1799                         break;
1800                 default:
1801                         index ++;
1802                         break;
1803                 }
1804         }
1805
1806         return(1);
1807 }
1808
1809 /**
1810  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1811  * @phba: pointer to lpfc hba data structure.
1812  * @mdp: pointer to the data structure to hold the derived model name.
1813  * @descp: pointer to the data structure to hold the derived description.
1814  *
1815  * This routine retrieves HBA's description based on its registered PCI device
1816  * ID. The @descp passed into this function points to an array of 256 chars. It
1817  * shall be returned with the model name, maximum speed, and the host bus type.
1818  * The @mdp passed into this function points to an array of 80 chars. When the
1819  * function returns, the @mdp will be filled with the model name.
1820  **/
1821 static void
1822 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1823 {
1824         lpfc_vpd_t *vp;
1825         uint16_t dev_id = phba->pcidev->device;
1826         int max_speed;
1827         int GE = 0;
1828         int oneConnect = 0; /* default is not a oneConnect */
1829         struct {
1830                 char *name;
1831                 char *bus;
1832                 char *function;
1833         } m = {"<Unknown>", "", ""};
1834
1835         if (mdp && mdp[0] != '\0'
1836                 && descp && descp[0] != '\0')
1837                 return;
1838
1839         if (phba->lmt & LMT_16Gb)
1840                 max_speed = 16;
1841         else if (phba->lmt & LMT_10Gb)
1842                 max_speed = 10;
1843         else if (phba->lmt & LMT_8Gb)
1844                 max_speed = 8;
1845         else if (phba->lmt & LMT_4Gb)
1846                 max_speed = 4;
1847         else if (phba->lmt & LMT_2Gb)
1848                 max_speed = 2;
1849         else
1850                 max_speed = 1;
1851
1852         vp = &phba->vpd;
1853
1854         switch (dev_id) {
1855         case PCI_DEVICE_ID_FIREFLY:
1856                 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1857                 break;
1858         case PCI_DEVICE_ID_SUPERFLY:
1859                 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1860                         m = (typeof(m)){"LP7000", "PCI",
1861                                         "Fibre Channel Adapter"};
1862                 else
1863                         m = (typeof(m)){"LP7000E", "PCI",
1864                                         "Fibre Channel Adapter"};
1865                 break;
1866         case PCI_DEVICE_ID_DRAGONFLY:
1867                 m = (typeof(m)){"LP8000", "PCI",
1868                                 "Fibre Channel Adapter"};
1869                 break;
1870         case PCI_DEVICE_ID_CENTAUR:
1871                 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1872                         m = (typeof(m)){"LP9002", "PCI",
1873                                         "Fibre Channel Adapter"};
1874                 else
1875                         m = (typeof(m)){"LP9000", "PCI",
1876                                         "Fibre Channel Adapter"};
1877                 break;
1878         case PCI_DEVICE_ID_RFLY:
1879                 m = (typeof(m)){"LP952", "PCI",
1880                                 "Fibre Channel Adapter"};
1881                 break;
1882         case PCI_DEVICE_ID_PEGASUS:
1883                 m = (typeof(m)){"LP9802", "PCI-X",
1884                                 "Fibre Channel Adapter"};
1885                 break;
1886         case PCI_DEVICE_ID_THOR:
1887                 m = (typeof(m)){"LP10000", "PCI-X",
1888                                 "Fibre Channel Adapter"};
1889                 break;
1890         case PCI_DEVICE_ID_VIPER:
1891                 m = (typeof(m)){"LPX1000",  "PCI-X",
1892                                 "Fibre Channel Adapter"};
1893                 break;
1894         case PCI_DEVICE_ID_PFLY:
1895                 m = (typeof(m)){"LP982", "PCI-X",
1896                                 "Fibre Channel Adapter"};
1897                 break;
1898         case PCI_DEVICE_ID_TFLY:
1899                 m = (typeof(m)){"LP1050", "PCI-X",
1900                                 "Fibre Channel Adapter"};
1901                 break;
1902         case PCI_DEVICE_ID_HELIOS:
1903                 m = (typeof(m)){"LP11000", "PCI-X2",
1904                                 "Fibre Channel Adapter"};
1905                 break;
1906         case PCI_DEVICE_ID_HELIOS_SCSP:
1907                 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1908                                 "Fibre Channel Adapter"};
1909                 break;
1910         case PCI_DEVICE_ID_HELIOS_DCSP:
1911                 m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1912                                 "Fibre Channel Adapter"};
1913                 break;
1914         case PCI_DEVICE_ID_NEPTUNE:
1915                 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1916                 break;
1917         case PCI_DEVICE_ID_NEPTUNE_SCSP:
1918                 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1919                 break;
1920         case PCI_DEVICE_ID_NEPTUNE_DCSP:
1921                 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1922                 break;
1923         case PCI_DEVICE_ID_BMID:
1924                 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1925                 break;
1926         case PCI_DEVICE_ID_BSMB:
1927                 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1928                 break;
1929         case PCI_DEVICE_ID_ZEPHYR:
1930                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1931                 break;
1932         case PCI_DEVICE_ID_ZEPHYR_SCSP:
1933                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1934                 break;
1935         case PCI_DEVICE_ID_ZEPHYR_DCSP:
1936                 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1937                 GE = 1;
1938                 break;
1939         case PCI_DEVICE_ID_ZMID:
1940                 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1941                 break;
1942         case PCI_DEVICE_ID_ZSMB:
1943                 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1944                 break;
1945         case PCI_DEVICE_ID_LP101:
1946                 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1947                 break;
1948         case PCI_DEVICE_ID_LP10000S:
1949                 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1950                 break;
1951         case PCI_DEVICE_ID_LP11000S:
1952                 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1953                 break;
1954         case PCI_DEVICE_ID_LPE11000S:
1955                 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1956                 break;
1957         case PCI_DEVICE_ID_SAT:
1958                 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1959                 break;
1960         case PCI_DEVICE_ID_SAT_MID:
1961                 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1962                 break;
1963         case PCI_DEVICE_ID_SAT_SMB:
1964                 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1965                 break;
1966         case PCI_DEVICE_ID_SAT_DCSP:
1967                 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1968                 break;
1969         case PCI_DEVICE_ID_SAT_SCSP:
1970                 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1971                 break;
1972         case PCI_DEVICE_ID_SAT_S:
1973                 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1974                 break;
1975         case PCI_DEVICE_ID_HORNET:
1976                 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1977                 GE = 1;
1978                 break;
1979         case PCI_DEVICE_ID_PROTEUS_VF:
1980                 m = (typeof(m)){"LPev12000", "PCIe IOV",
1981                                 "Fibre Channel Adapter"};
1982                 break;
1983         case PCI_DEVICE_ID_PROTEUS_PF:
1984                 m = (typeof(m)){"LPev12000", "PCIe IOV",
1985                                 "Fibre Channel Adapter"};
1986                 break;
1987         case PCI_DEVICE_ID_PROTEUS_S:
1988                 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1989                                 "Fibre Channel Adapter"};
1990                 break;
1991         case PCI_DEVICE_ID_TIGERSHARK:
1992                 oneConnect = 1;
1993                 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1994                 break;
1995         case PCI_DEVICE_ID_TOMCAT:
1996                 oneConnect = 1;
1997                 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1998                 break;
1999         case PCI_DEVICE_ID_FALCON:
2000                 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2001                                 "EmulexSecure Fibre"};
2002                 break;
2003         case PCI_DEVICE_ID_BALIUS:
2004                 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2005                                 "Fibre Channel Adapter"};
2006                 break;
2007         case PCI_DEVICE_ID_LANCER_FC:
2008         case PCI_DEVICE_ID_LANCER_FC_VF:
2009                 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2010                 break;
2011         case PCI_DEVICE_ID_LANCER_FCOE:
2012         case PCI_DEVICE_ID_LANCER_FCOE_VF:
2013                 oneConnect = 1;
2014                 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2015                 break;
2016         default:
2017                 m = (typeof(m)){"Unknown", "", ""};
2018                 break;
2019         }
2020
2021         if (mdp && mdp[0] == '\0')
2022                 snprintf(mdp, 79,"%s", m.name);
2023         /*
2024          * oneConnect hba requires special processing, they are all initiators
2025          * and we put the port number on the end
2026          */
2027         if (descp && descp[0] == '\0') {
2028                 if (oneConnect)
2029                         snprintf(descp, 255,
2030                                 "Emulex OneConnect %s, %s Initiator, Port %s",
2031                                 m.name, m.function,
2032                                 phba->Port);
2033                 else
2034                         snprintf(descp, 255,
2035                                 "Emulex %s %d%s %s %s",
2036                                 m.name, max_speed, (GE) ? "GE" : "Gb",
2037                                 m.bus, m.function);
2038         }
2039 }
2040
2041 /**
2042  * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2043  * @phba: pointer to lpfc hba data structure.
2044  * @pring: pointer to a IOCB ring.
2045  * @cnt: the number of IOCBs to be posted to the IOCB ring.
2046  *
2047  * This routine posts a given number of IOCBs with the associated DMA buffer
2048  * descriptors specified by the cnt argument to the given IOCB ring.
2049  *
2050  * Return codes
2051  *   The number of IOCBs NOT able to be posted to the IOCB ring.
2052  **/
2053 int
2054 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2055 {
2056         IOCB_t *icmd;
2057         struct lpfc_iocbq *iocb;
2058         struct lpfc_dmabuf *mp1, *mp2;
2059
2060         cnt += pring->missbufcnt;
2061
2062         /* While there are buffers to post */
2063         while (cnt > 0) {
2064                 /* Allocate buffer for  command iocb */
2065                 iocb = lpfc_sli_get_iocbq(phba);
2066                 if (iocb == NULL) {
2067                         pring->missbufcnt = cnt;
2068                         return cnt;
2069                 }
2070                 icmd = &iocb->iocb;
2071
2072                 /* 2 buffers can be posted per command */
2073                 /* Allocate buffer to post */
2074                 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2075                 if (mp1)
2076                     mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2077                 if (!mp1 || !mp1->virt) {
2078                         kfree(mp1);
2079                         lpfc_sli_release_iocbq(phba, iocb);
2080                         pring->missbufcnt = cnt;
2081                         return cnt;
2082                 }
2083
2084                 INIT_LIST_HEAD(&mp1->list);
2085                 /* Allocate buffer to post */
2086                 if (cnt > 1) {
2087                         mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2088                         if (mp2)
2089                                 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2090                                                             &mp2->phys);
2091                         if (!mp2 || !mp2->virt) {
2092                                 kfree(mp2);
2093                                 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2094                                 kfree(mp1);
2095                                 lpfc_sli_release_iocbq(phba, iocb);
2096                                 pring->missbufcnt = cnt;
2097                                 return cnt;
2098                         }
2099
2100                         INIT_LIST_HEAD(&mp2->list);
2101                 } else {
2102                         mp2 = NULL;
2103                 }
2104
2105                 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2106                 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2107                 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2108                 icmd->ulpBdeCount = 1;
2109                 cnt--;
2110                 if (mp2) {
2111                         icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2112                         icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2113                         icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2114                         cnt--;
2115                         icmd->ulpBdeCount = 2;
2116                 }
2117
2118                 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2119                 icmd->ulpLe = 1;
2120
2121                 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2122                     IOCB_ERROR) {
2123                         lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2124                         kfree(mp1);
2125                         cnt++;
2126                         if (mp2) {
2127                                 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2128                                 kfree(mp2);
2129                                 cnt++;
2130                         }
2131                         lpfc_sli_release_iocbq(phba, iocb);
2132                         pring->missbufcnt = cnt;
2133                         return cnt;
2134                 }
2135                 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2136                 if (mp2)
2137                         lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2138         }
2139         pring->missbufcnt = 0;
2140         return 0;
2141 }
2142
2143 /**
2144  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2145  * @phba: pointer to lpfc hba data structure.
2146  *
2147  * This routine posts initial receive IOCB buffers to the ELS ring. The
2148  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2149  * set to 64 IOCBs.
2150  *
2151  * Return codes
2152  *   0 - success (currently always success)
2153  **/
2154 static int
2155 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2156 {
2157         struct lpfc_sli *psli = &phba->sli;
2158
2159         /* Ring 0, ELS / CT buffers */
2160         lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2161         /* Ring 2 - FCP no buffers needed */
2162
2163         return 0;
2164 }
2165
2166 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2167
2168 /**
2169  * lpfc_sha_init - Set up initial array of hash table entries
2170  * @HashResultPointer: pointer to an array as hash table.
2171  *
2172  * This routine sets up the initial values to the array of hash table entries
2173  * for the LC HBAs.
2174  **/
2175 static void
2176 lpfc_sha_init(uint32_t * HashResultPointer)
2177 {
2178         HashResultPointer[0] = 0x67452301;
2179         HashResultPointer[1] = 0xEFCDAB89;
2180         HashResultPointer[2] = 0x98BADCFE;
2181         HashResultPointer[3] = 0x10325476;
2182         HashResultPointer[4] = 0xC3D2E1F0;
2183 }
2184
2185 /**
2186  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2187  * @HashResultPointer: pointer to an initial/result hash table.
2188  * @HashWorkingPointer: pointer to an working hash table.
2189  *
2190  * This routine iterates an initial hash table pointed by @HashResultPointer
2191  * with the values from the working hash table pointeed by @HashWorkingPointer.
2192  * The results are putting back to the initial hash table, returned through
2193  * the @HashResultPointer as the result hash table.
2194  **/
2195 static void
2196 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2197 {
2198         int t;
2199         uint32_t TEMP;
2200         uint32_t A, B, C, D, E;
2201         t = 16;
2202         do {
2203                 HashWorkingPointer[t] =
2204                     S(1,
2205                       HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2206                                                                      8] ^
2207                       HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2208         } while (++t <= 79);
2209         t = 0;
2210         A = HashResultPointer[0];
2211         B = HashResultPointer[1];
2212         C = HashResultPointer[2];
2213         D = HashResultPointer[3];
2214         E = HashResultPointer[4];
2215
2216         do {
2217                 if (t < 20) {
2218                         TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2219                 } else if (t < 40) {
2220                         TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2221                 } else if (t < 60) {
2222                         TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2223                 } else {
2224                         TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2225                 }
2226                 TEMP += S(5, A) + E + HashWorkingPointer[t];
2227                 E = D;
2228                 D = C;
2229                 C = S(30, B);
2230                 B = A;
2231                 A = TEMP;
2232         } while (++t <= 79);
2233
2234         HashResultPointer[0] += A;
2235         HashResultPointer[1] += B;
2236         HashResultPointer[2] += C;
2237         HashResultPointer[3] += D;
2238         HashResultPointer[4] += E;
2239
2240 }
2241
2242 /**
2243  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2244  * @RandomChallenge: pointer to the entry of host challenge random number array.
2245  * @HashWorking: pointer to the entry of the working hash array.
2246  *
2247  * This routine calculates the working hash array referred by @HashWorking
2248  * from the challenge random numbers associated with the host, referred by
2249  * @RandomChallenge. The result is put into the entry of the working hash
2250  * array and returned by reference through @HashWorking.
2251  **/
2252 static void
2253 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2254 {
2255         *HashWorking = (*RandomChallenge ^ *HashWorking);
2256 }
2257
2258 /**
2259  * lpfc_hba_init - Perform special handling for LC HBA initialization
2260  * @phba: pointer to lpfc hba data structure.
2261  * @hbainit: pointer to an array of unsigned 32-bit integers.
2262  *
2263  * This routine performs the special handling for LC HBA initialization.
2264  **/
2265 void
2266 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2267 {
2268         int t;
2269         uint32_t *HashWorking;
2270         uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2271
2272         HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2273         if (!HashWorking)
2274                 return;
2275
2276         HashWorking[0] = HashWorking[78] = *pwwnn++;
2277         HashWorking[1] = HashWorking[79] = *pwwnn;
2278
2279         for (t = 0; t < 7; t++)
2280                 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2281
2282         lpfc_sha_init(hbainit);
2283         lpfc_sha_iterate(hbainit, HashWorking);
2284         kfree(HashWorking);
2285 }
2286
2287 /**
2288  * lpfc_cleanup - Performs vport cleanups before deleting a vport
2289  * @vport: pointer to a virtual N_Port data structure.
2290  *
2291  * This routine performs the necessary cleanups before deleting the @vport.
2292  * It invokes the discovery state machine to perform necessary state
2293  * transitions and to release the ndlps associated with the @vport. Note,
2294  * the physical port is treated as @vport 0.
2295  **/
2296 void
2297 lpfc_cleanup(struct lpfc_vport *vport)
2298 {
2299         struct lpfc_hba   *phba = vport->phba;
2300         struct lpfc_nodelist *ndlp, *next_ndlp;
2301         int i = 0;
2302
2303         if (phba->link_state > LPFC_LINK_DOWN)
2304                 lpfc_port_link_failure(vport);
2305
2306         list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2307                 if (!NLP_CHK_NODE_ACT(ndlp)) {
2308                         ndlp = lpfc_enable_node(vport, ndlp,
2309                                                 NLP_STE_UNUSED_NODE);
2310                         if (!ndlp)
2311                                 continue;
2312                         spin_lock_irq(&phba->ndlp_lock);
2313                         NLP_SET_FREE_REQ(ndlp);
2314                         spin_unlock_irq(&phba->ndlp_lock);
2315                         /* Trigger the release of the ndlp memory */
2316                         lpfc_nlp_put(ndlp);
2317                         continue;
2318                 }
2319                 spin_lock_irq(&phba->ndlp_lock);
2320                 if (NLP_CHK_FREE_REQ(ndlp)) {
2321                         /* The ndlp should not be in memory free mode already */
2322                         spin_unlock_irq(&phba->ndlp_lock);
2323                         continue;
2324                 } else
2325                         /* Indicate request for freeing ndlp memory */
2326                         NLP_SET_FREE_REQ(ndlp);
2327                 spin_unlock_irq(&phba->ndlp_lock);
2328
2329                 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2330                     ndlp->nlp_DID == Fabric_DID) {
2331                         /* Just free up ndlp with Fabric_DID for vports */
2332                         lpfc_nlp_put(ndlp);
2333                         continue;
2334                 }
2335
2336                 if (ndlp->nlp_type & NLP_FABRIC)
2337                         lpfc_disc_state_machine(vport, ndlp, NULL,
2338                                         NLP_EVT_DEVICE_RECOVERY);
2339
2340                 lpfc_disc_state_machine(vport, ndlp, NULL,
2341                                              NLP_EVT_DEVICE_RM);
2342
2343         }
2344
2345         /* At this point, ALL ndlp's should be gone
2346          * because of the previous NLP_EVT_DEVICE_RM.
2347          * Lets wait for this to happen, if needed.
2348          */
2349         while (!list_empty(&vport->fc_nodes)) {
2350                 if (i++ > 3000) {
2351                         lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2352                                 "0233 Nodelist not empty\n");
2353                         list_for_each_entry_safe(ndlp, next_ndlp,
2354                                                 &vport->fc_nodes, nlp_listp) {
2355                                 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2356                                                 LOG_NODE,
2357                                                 "0282 did:x%x ndlp:x%p "
2358                                                 "usgmap:x%x refcnt:%d\n",
2359                                                 ndlp->nlp_DID, (void *)ndlp,
2360                                                 ndlp->nlp_usg_map,
2361                                                 atomic_read(
2362                                                         &ndlp->kref.refcount));
2363                         }
2364                         break;
2365                 }
2366
2367                 /* Wait for any activity on ndlps to settle */
2368                 msleep(10);
2369         }
2370         lpfc_cleanup_vports_rrqs(vport, NULL);
2371 }
2372
2373 /**
2374  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2375  * @vport: pointer to a virtual N_Port data structure.
2376  *
2377  * This routine stops all the timers associated with a @vport. This function
2378  * is invoked before disabling or deleting a @vport. Note that the physical
2379  * port is treated as @vport 0.
2380  **/
2381 void
2382 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2383 {
2384         del_timer_sync(&vport->els_tmofunc);
2385         del_timer_sync(&vport->fc_fdmitmo);
2386         del_timer_sync(&vport->delayed_disc_tmo);
2387         lpfc_can_disctmo(vport);
2388         return;
2389 }
2390
2391 /**
2392  * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2393  * @phba: pointer to lpfc hba data structure.
2394  *
2395  * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2396  * caller of this routine should already hold the host lock.
2397  **/
2398 void
2399 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2400 {
2401         /* Clear pending FCF rediscovery wait flag */
2402         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2403
2404         /* Now, try to stop the timer */
2405         del_timer(&phba->fcf.redisc_wait);
2406 }
2407
2408 /**
2409  * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2410  * @phba: pointer to lpfc hba data structure.
2411  *
2412  * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2413  * checks whether the FCF rediscovery wait timer is pending with the host
2414  * lock held before proceeding with disabling the timer and clearing the
2415  * wait timer pendig flag.
2416  **/
2417 void
2418 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2419 {
2420         spin_lock_irq(&phba->hbalock);
2421         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2422                 /* FCF rediscovery timer already fired or stopped */
2423                 spin_unlock_irq(&phba->hbalock);
2424                 return;
2425         }
2426         __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2427         /* Clear failover in progress flags */
2428         phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2429         spin_unlock_irq(&phba->hbalock);
2430 }
2431
2432 /**
2433  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2434  * @phba: pointer to lpfc hba data structure.
2435  *
2436  * This routine stops all the timers associated with a HBA. This function is
2437  * invoked before either putting a HBA offline or unloading the driver.
2438  **/
2439 void
2440 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2441 {
2442         lpfc_stop_vport_timers(phba->pport);
2443         del_timer_sync(&phba->sli.mbox_tmo);
2444         del_timer_sync(&phba->fabric_block_timer);
2445         del_timer_sync(&phba->eratt_poll);
2446         del_timer_sync(&phba->hb_tmofunc);
2447         if (phba->sli_rev == LPFC_SLI_REV4) {
2448                 del_timer_sync(&phba->rrq_tmr);
2449                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2450         }
2451         phba->hb_outstanding = 0;
2452
2453         switch (phba->pci_dev_grp) {
2454         case LPFC_PCI_DEV_LP:
2455                 /* Stop any LightPulse device specific driver timers */
2456                 del_timer_sync(&phba->fcp_poll_timer);
2457                 break;
2458         case LPFC_PCI_DEV_OC:
2459                 /* Stop any OneConnect device sepcific driver timers */
2460                 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2461                 break;
2462         default:
2463                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2464                                 "0297 Invalid device group (x%x)\n",
2465                                 phba->pci_dev_grp);
2466                 break;
2467         }
2468         return;
2469 }
2470
2471 /**
2472  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2473  * @phba: pointer to lpfc hba data structure.
2474  *
2475  * This routine marks a HBA's management interface as blocked. Once the HBA's
2476  * management interface is marked as blocked, all the user space access to
2477  * the HBA, whether they are from sysfs interface or libdfc interface will
2478  * all be blocked. The HBA is set to block the management interface when the
2479  * driver prepares the HBA interface for online or offline.
2480  **/
2481 static void
2482 lpfc_block_mgmt_io(struct lpfc_hba * phba)
2483 {
2484         unsigned long iflag;
2485         uint8_t actcmd = MBX_HEARTBEAT;
2486         unsigned long timeout;
2487
2488         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2489         spin_lock_irqsave(&phba->hbalock, iflag);
2490         phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2491         if (phba->sli.mbox_active) {
2492                 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2493                 /* Determine how long we might wait for the active mailbox
2494                  * command to be gracefully completed by firmware.
2495                  */
2496                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2497                                 phba->sli.mbox_active) * 1000) + jiffies;
2498         }
2499         spin_unlock_irqrestore(&phba->hbalock, iflag);
2500
2501         /* Wait for the outstnading mailbox command to complete */
2502         while (phba->sli.mbox_active) {
2503                 /* Check active mailbox complete status every 2ms */
2504                 msleep(2);
2505                 if (time_after(jiffies, timeout)) {
2506                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2507                                 "2813 Mgmt IO is Blocked %x "
2508                                 "- mbox cmd %x still active\n",
2509                                 phba->sli.sli_flag, actcmd);
2510                         break;
2511                 }
2512         }
2513 }
2514
2515 /**
2516  * lpfc_online - Initialize and bring a HBA online
2517  * @phba: pointer to lpfc hba data structure.
2518  *
2519  * This routine initializes the HBA and brings a HBA online. During this
2520  * process, the management interface is blocked to prevent user space access
2521  * to the HBA interfering with the driver initialization.
2522  *
2523  * Return codes
2524  *   0 - successful
2525  *   1 - failed
2526  **/
2527 int
2528 lpfc_online(struct lpfc_hba *phba)
2529 {
2530         struct lpfc_vport *vport;
2531         struct lpfc_vport **vports;
2532         int i;
2533
2534         if (!phba)
2535                 return 0;
2536         vport = phba->pport;
2537
2538         if (!(vport->fc_flag & FC_OFFLINE_MODE))
2539                 return 0;
2540
2541         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2542                         "0458 Bring Adapter online\n");
2543
2544         lpfc_block_mgmt_io(phba);
2545
2546         if (!lpfc_sli_queue_setup(phba)) {
2547                 lpfc_unblock_mgmt_io(phba);
2548                 return 1;
2549         }
2550
2551         if (phba->sli_rev == LPFC_SLI_REV4) {
2552                 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2553                         lpfc_unblock_mgmt_io(phba);
2554                         return 1;
2555                 }
2556         } else {
2557                 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2558                         lpfc_unblock_mgmt_io(phba);
2559                         return 1;
2560                 }
2561         }
2562
2563         vports = lpfc_create_vport_work_array(phba);
2564         if (vports != NULL)
2565                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2566                         struct Scsi_Host *shost;
2567                         shost = lpfc_shost_from_vport(vports[i]);
2568                         spin_lock_irq(shost->host_lock);
2569                         vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2570                         if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2571                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2572                         if (phba->sli_rev == LPFC_SLI_REV4)
2573                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2574                         spin_unlock_irq(shost->host_lock);
2575                 }
2576                 lpfc_destroy_vport_work_array(phba, vports);
2577
2578         lpfc_unblock_mgmt_io(phba);
2579         return 0;
2580 }
2581
2582 /**
2583  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2584  * @phba: pointer to lpfc hba data structure.
2585  *
2586  * This routine marks a HBA's management interface as not blocked. Once the
2587  * HBA's management interface is marked as not blocked, all the user space
2588  * access to the HBA, whether they are from sysfs interface or libdfc
2589  * interface will be allowed. The HBA is set to block the management interface
2590  * when the driver prepares the HBA interface for online or offline and then
2591  * set to unblock the management interface afterwards.
2592  **/
2593 void
2594 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2595 {
2596         unsigned long iflag;
2597
2598         spin_lock_irqsave(&phba->hbalock, iflag);
2599         phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2600         spin_unlock_irqrestore(&phba->hbalock, iflag);
2601 }
2602
2603 /**
2604  * lpfc_offline_prep - Prepare a HBA to be brought offline
2605  * @phba: pointer to lpfc hba data structure.
2606  *
2607  * This routine is invoked to prepare a HBA to be brought offline. It performs
2608  * unregistration login to all the nodes on all vports and flushes the mailbox
2609  * queue to make it ready to be brought offline.
2610  **/
2611 void
2612 lpfc_offline_prep(struct lpfc_hba * phba)
2613 {
2614         struct lpfc_vport *vport = phba->pport;
2615         struct lpfc_nodelist  *ndlp, *next_ndlp;
2616         struct lpfc_vport **vports;
2617         struct Scsi_Host *shost;
2618         int i;
2619
2620         if (vport->fc_flag & FC_OFFLINE_MODE)
2621                 return;
2622
2623         lpfc_block_mgmt_io(phba);
2624
2625         lpfc_linkdown(phba);
2626
2627         /* Issue an unreg_login to all nodes on all vports */
2628         vports = lpfc_create_vport_work_array(phba);
2629         if (vports != NULL) {
2630                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2631                         if (vports[i]->load_flag & FC_UNLOADING)
2632                                 continue;
2633                         shost = lpfc_shost_from_vport(vports[i]);
2634                         spin_lock_irq(shost->host_lock);
2635                         vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2636                         vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2637                         vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2638                         spin_unlock_irq(shost->host_lock);
2639
2640                         shost = lpfc_shost_from_vport(vports[i]);
2641                         list_for_each_entry_safe(ndlp, next_ndlp,
2642                                                  &vports[i]->fc_nodes,
2643                                                  nlp_listp) {
2644                                 if (!NLP_CHK_NODE_ACT(ndlp))
2645                                         continue;
2646                                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2647                                         continue;
2648                                 if (ndlp->nlp_type & NLP_FABRIC) {
2649                                         lpfc_disc_state_machine(vports[i], ndlp,
2650                                                 NULL, NLP_EVT_DEVICE_RECOVERY);
2651                                         lpfc_disc_state_machine(vports[i], ndlp,
2652                                                 NULL, NLP_EVT_DEVICE_RM);
2653                                 }
2654                                 spin_lock_irq(shost->host_lock);
2655                                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2656                                 spin_unlock_irq(shost->host_lock);
2657                                 lpfc_unreg_rpi(vports[i], ndlp);
2658                         }
2659                 }
2660         }
2661         lpfc_destroy_vport_work_array(phba, vports);
2662
2663         lpfc_sli_mbox_sys_shutdown(phba);
2664 }
2665
2666 /**
2667  * lpfc_offline - Bring a HBA offline
2668  * @phba: pointer to lpfc hba data structure.
2669  *
2670  * This routine actually brings a HBA offline. It stops all the timers
2671  * associated with the HBA, brings down the SLI layer, and eventually
2672  * marks the HBA as in offline state for the upper layer protocol.
2673  **/
2674 void
2675 lpfc_offline(struct lpfc_hba *phba)
2676 {
2677         struct Scsi_Host  *shost;
2678         struct lpfc_vport **vports;
2679         int i;
2680
2681         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2682                 return;
2683
2684         /* stop port and all timers associated with this hba */
2685         lpfc_stop_port(phba);
2686         vports = lpfc_create_vport_work_array(phba);
2687         if (vports != NULL)
2688                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2689                         lpfc_stop_vport_timers(vports[i]);
2690         lpfc_destroy_vport_work_array(phba, vports);
2691         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2692                         "0460 Bring Adapter offline\n");
2693         /* Bring down the SLI Layer and cleanup.  The HBA is offline
2694            now.  */
2695         lpfc_sli_hba_down(phba);
2696         spin_lock_irq(&phba->hbalock);
2697         phba->work_ha = 0;
2698         spin_unlock_irq(&phba->hbalock);
2699         vports = lpfc_create_vport_work_array(phba);
2700         if (vports != NULL)
2701                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2702                         shost = lpfc_shost_from_vport(vports[i]);
2703                         spin_lock_irq(shost->host_lock);
2704                         vports[i]->work_port_events = 0;
2705                         vports[i]->fc_flag |= FC_OFFLINE_MODE;
2706                         spin_unlock_irq(shost->host_lock);
2707                 }
2708         lpfc_destroy_vport_work_array(phba, vports);
2709 }
2710
2711 /**
2712  * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
2713  * @phba: pointer to lpfc hba data structure.
2714  *
2715  * This routine goes through all the scsi buffers in the system and updates the
2716  * Physical XRIs assigned to the SCSI buffer because these may change after any
2717  * firmware reset
2718  *
2719  * Return codes
2720  *   0 - successful (for now, it always returns 0)
2721  **/
2722 int
2723 lpfc_scsi_buf_update(struct lpfc_hba *phba)
2724 {
2725         struct lpfc_scsi_buf *sb, *sb_next;
2726
2727         spin_lock_irq(&phba->hbalock);
2728         spin_lock(&phba->scsi_buf_list_lock);
2729         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list)
2730                 sb->cur_iocbq.sli4_xritag =
2731                         phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
2732         spin_unlock(&phba->scsi_buf_list_lock);
2733         spin_unlock_irq(&phba->hbalock);
2734         return 0;
2735 }
2736
2737 /**
2738  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2739  * @phba: pointer to lpfc hba data structure.
2740  *
2741  * This routine is to free all the SCSI buffers and IOCBs from the driver
2742  * list back to kernel. It is called from lpfc_pci_remove_one to free
2743  * the internal resources before the device is removed from the system.
2744  *
2745  * Return codes
2746  *   0 - successful (for now, it always returns 0)
2747  **/
2748 static int
2749 lpfc_scsi_free(struct lpfc_hba *phba)
2750 {
2751         struct lpfc_scsi_buf *sb, *sb_next;
2752         struct lpfc_iocbq *io, *io_next;
2753
2754         spin_lock_irq(&phba->hbalock);
2755         /* Release all the lpfc_scsi_bufs maintained by this host. */
2756         spin_lock(&phba->scsi_buf_list_lock);
2757         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2758                 list_del(&sb->list);
2759                 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2760                               sb->dma_handle);
2761                 kfree(sb);
2762                 phba->total_scsi_bufs--;
2763         }
2764         spin_unlock(&phba->scsi_buf_list_lock);
2765
2766         /* Release all the lpfc_iocbq entries maintained by this host. */
2767         list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2768                 list_del(&io->list);
2769                 kfree(io);
2770                 phba->total_iocbq_bufs--;
2771         }
2772
2773         spin_unlock_irq(&phba->hbalock);
2774         return 0;
2775 }
2776
2777 /**
2778  * lpfc_create_port - Create an FC port
2779  * @phba: pointer to lpfc hba data structure.
2780  * @instance: a unique integer ID to this FC port.
2781  * @dev: pointer to the device data structure.
2782  *
2783  * This routine creates a FC port for the upper layer protocol. The FC port
2784  * can be created on top of either a physical port or a virtual port provided
2785  * by the HBA. This routine also allocates a SCSI host data structure (shost)
2786  * and associates the FC port created before adding the shost into the SCSI
2787  * layer.
2788  *
2789  * Return codes
2790  *   @vport - pointer to the virtual N_Port data structure.
2791  *   NULL - port create failed.
2792  **/
2793 struct lpfc_vport *
2794 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2795 {
2796         struct lpfc_vport *vport;
2797         struct Scsi_Host  *shost;
2798         int error = 0;
2799
2800         if (dev != &phba->pcidev->dev)
2801                 shost = scsi_host_alloc(&lpfc_vport_template,
2802                                         sizeof(struct lpfc_vport));
2803         else
2804                 shost = scsi_host_alloc(&lpfc_template,
2805                                         sizeof(struct lpfc_vport));
2806         if (!shost)
2807                 goto out;
2808
2809         vport = (struct lpfc_vport *) shost->hostdata;
2810         vport->phba = phba;
2811         vport->load_flag |= FC_LOADING;
2812         vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2813         vport->fc_rscn_flush = 0;
2814
2815         lpfc_get_vport_cfgparam(vport);
2816         shost->unique_id = instance;
2817         shost->max_id = LPFC_MAX_TARGET;
2818         shost->max_lun = vport->cfg_max_luns;
2819         shost->this_id = -1;
2820         shost->max_cmd_len = 16;
2821         if (phba->sli_rev == LPFC_SLI_REV4) {
2822                 shost->dma_boundary =
2823                         phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2824                 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2825         }
2826
2827         /*
2828          * Set initial can_queue value since 0 is no longer supported and
2829          * scsi_add_host will fail. This will be adjusted later based on the
2830          * max xri value determined in hba setup.
2831          */
2832         shost->can_queue = phba->cfg_hba_queue_depth - 10;
2833         if (dev != &phba->pcidev->dev) {
2834                 shost->transportt = lpfc_vport_transport_template;
2835                 vport->port_type = LPFC_NPIV_PORT;
2836         } else {
2837                 shost->transportt = lpfc_transport_template;
2838                 vport->port_type = LPFC_PHYSICAL_PORT;
2839         }
2840
2841         /* Initialize all internally managed lists. */
2842         INIT_LIST_HEAD(&vport->fc_nodes);
2843         INIT_LIST_HEAD(&vport->rcv_buffer_list);
2844         spin_lock_init(&vport->work_port_lock);
2845
2846         init_timer(&vport->fc_disctmo);
2847         vport->fc_disctmo.function = lpfc_disc_timeout;
2848         vport->fc_disctmo.data = (unsigned long)vport;
2849
2850         init_timer(&vport->fc_fdmitmo);
2851         vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2852         vport->fc_fdmitmo.data = (unsigned long)vport;
2853
2854         init_timer(&vport->els_tmofunc);
2855         vport->els_tmofunc.function = lpfc_els_timeout;
2856         vport->els_tmofunc.data = (unsigned long)vport;
2857
2858         init_timer(&vport->delayed_disc_tmo);
2859         vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
2860         vport->delayed_disc_tmo.data = (unsigned long)vport;
2861
2862         error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2863         if (error)
2864                 goto out_put_shost;
2865
2866         spin_lock_irq(&phba->hbalock);
2867         list_add_tail(&vport->listentry, &phba->port_list);
2868         spin_unlock_irq(&phba->hbalock);
2869         return vport;
2870
2871 out_put_shost:
2872         scsi_host_put(shost);
2873 out:
2874         return NULL;
2875 }
2876
2877 /**
2878  * destroy_port -  destroy an FC port
2879  * @vport: pointer to an lpfc virtual N_Port data structure.
2880  *
2881  * This routine destroys a FC port from the upper layer protocol. All the
2882  * resources associated with the port are released.
2883  **/
2884 void
2885 destroy_port(struct lpfc_vport *vport)
2886 {
2887         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2888         struct lpfc_hba  *phba = vport->phba;
2889
2890         lpfc_debugfs_terminate(vport);
2891         fc_remove_host(shost);
2892         scsi_remove_host(shost);
2893
2894         spin_lock_irq(&phba->hbalock);
2895         list_del_init(&vport->listentry);
2896         spin_unlock_irq(&phba->hbalock);
2897
2898         lpfc_cleanup(vport);
2899         return;
2900 }
2901
2902 /**
2903  * lpfc_get_instance - Get a unique integer ID
2904  *
2905  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2906  * uses the kernel idr facility to perform the task.
2907  *
2908  * Return codes:
2909  *   instance - a unique integer ID allocated as the new instance.
2910  *   -1 - lpfc get instance failed.
2911  **/
2912 int
2913 lpfc_get_instance(void)
2914 {
2915         int instance = 0;
2916
2917         /* Assign an unused number */
2918         if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2919                 return -1;
2920         if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2921                 return -1;
2922         return instance;
2923 }
2924
2925 /**
2926  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2927  * @shost: pointer to SCSI host data structure.
2928  * @time: elapsed time of the scan in jiffies.
2929  *
2930  * This routine is called by the SCSI layer with a SCSI host to determine
2931  * whether the scan host is finished.
2932  *
2933  * Note: there is no scan_start function as adapter initialization will have
2934  * asynchronously kicked off the link initialization.
2935  *
2936  * Return codes
2937  *   0 - SCSI host scan is not over yet.
2938  *   1 - SCSI host scan is over.
2939  **/
2940 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2941 {
2942         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2943         struct lpfc_hba   *phba = vport->phba;
2944         int stat = 0;
2945
2946         spin_lock_irq(shost->host_lock);
2947
2948         if (vport->load_flag & FC_UNLOADING) {
2949                 stat = 1;
2950                 goto finished;
2951         }
2952         if (time >= 30 * HZ) {
2953                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2954                                 "0461 Scanning longer than 30 "
2955                                 "seconds.  Continuing initialization\n");
2956                 stat = 1;
2957                 goto finished;
2958         }
2959         if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2960                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2961                                 "0465 Link down longer than 15 "
2962                                 "seconds.  Continuing initialization\n");
2963                 stat = 1;
2964                 goto finished;
2965         }
2966
2967         if (vport->port_state != LPFC_VPORT_READY)
2968                 goto finished;
2969         if (vport->num_disc_nodes || vport->fc_prli_sent)
2970                 goto finished;
2971         if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2972                 goto finished;
2973         if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2974                 goto finished;
2975
2976         stat = 1;
2977
2978 finished:
2979         spin_unlock_irq(shost->host_lock);
2980         return stat;
2981 }
2982
2983 /**
2984  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2985  * @shost: pointer to SCSI host data structure.
2986  *
2987  * This routine initializes a given SCSI host attributes on a FC port. The
2988  * SCSI host can be either on top of a physical port or a virtual port.
2989  **/
2990 void lpfc_host_attrib_init(struct Scsi_Host *shost)
2991 {
2992         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2993         struct lpfc_hba   *phba = vport->phba;
2994         /*
2995          * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2996          */
2997
2998         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2999         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3000         fc_host_supported_classes(shost) = FC_COS_CLASS3;
3001
3002         memset(fc_host_supported_fc4s(shost), 0,
3003                sizeof(fc_host_supported_fc4s(shost)));
3004         fc_host_supported_fc4s(shost)[2] = 1;
3005         fc_host_supported_fc4s(shost)[7] = 1;
3006
3007         lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
3008                                  sizeof fc_host_symbolic_name(shost));
3009
3010         fc_host_supported_speeds(shost) = 0;
3011         if (phba->lmt & LMT_16Gb)
3012                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
3013         if (phba->lmt & LMT_10Gb)
3014                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
3015         if (phba->lmt & LMT_8Gb)
3016                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
3017         if (phba->lmt & LMT_4Gb)
3018                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
3019         if (phba->lmt & LMT_2Gb)
3020                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
3021         if (phba->lmt & LMT_1Gb)
3022                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
3023
3024         fc_host_maxframe_size(shost) =
3025                 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
3026                 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
3027
3028         fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
3029
3030         /* This value is also unchanging */
3031         memset(fc_host_active_fc4s(shost), 0,
3032                sizeof(fc_host_active_fc4s(shost)));
3033         fc_host_active_fc4s(shost)[2] = 1;
3034         fc_host_active_fc4s(shost)[7] = 1;
3035
3036         fc_host_max_npiv_vports(shost) = phba->max_vpi;
3037         spin_lock_irq(shost->host_lock);
3038         vport->load_flag &= ~FC_LOADING;
3039         spin_unlock_irq(shost->host_lock);
3040 }
3041
3042 /**
3043  * lpfc_stop_port_s3 - Stop SLI3 device port
3044  * @phba: pointer to lpfc hba data structure.
3045  *
3046  * This routine is invoked to stop an SLI3 device port, it stops the device
3047  * from generating interrupts and stops the device driver's timers for the
3048  * device.
3049  **/
3050 static void
3051 lpfc_stop_port_s3(struct lpfc_hba *phba)
3052 {
3053         /* Clear all interrupt enable conditions */
3054         writel(0, phba->HCregaddr);
3055         readl(phba->HCregaddr); /* flush */
3056         /* Clear all pending interrupts */
3057         writel(0xffffffff, phba->HAregaddr);
3058         readl(phba->HAregaddr); /* flush */
3059
3060         /* Reset some HBA SLI setup states */
3061         lpfc_stop_hba_timers(phba);
3062         phba->pport->work_port_events = 0;
3063 }
3064
3065 /**
3066  * lpfc_stop_port_s4 - Stop SLI4 device port
3067  * @phba: pointer to lpfc hba data structure.
3068  *
3069  * This routine is invoked to stop an SLI4 device port, it stops the device
3070  * from generating interrupts and stops the device driver's timers for the
3071  * device.
3072  **/
3073 static void
3074 lpfc_stop_port_s4(struct lpfc_hba *phba)
3075 {
3076         /* Reset some HBA SLI4 setup states */
3077         lpfc_stop_hba_timers(phba);
3078         phba->pport->work_port_events = 0;
3079         phba->sli4_hba.intr_enable = 0;
3080 }
3081
3082 /**
3083  * lpfc_stop_port - Wrapper function for stopping hba port
3084  * @phba: Pointer to HBA context object.
3085  *
3086  * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3087  * the API jump table function pointer from the lpfc_hba struct.
3088  **/
3089 void
3090 lpfc_stop_port(struct lpfc_hba *phba)
3091 {
3092         phba->lpfc_stop_port(phba);
3093 }
3094
3095 /**
3096  * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3097  * @phba: Pointer to hba for which this call is being executed.
3098  *
3099  * This routine starts the timer waiting for the FCF rediscovery to complete.
3100  **/
3101 void
3102 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3103 {
3104         unsigned long fcf_redisc_wait_tmo =
3105                 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3106         /* Start fcf rediscovery wait period timer */
3107         mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3108         spin_lock_irq(&phba->hbalock);
3109         /* Allow action to new fcf asynchronous event */
3110         phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3111         /* Mark the FCF rediscovery pending state */
3112         phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3113         spin_unlock_irq(&phba->hbalock);
3114 }
3115
3116 /**
3117  * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3118  * @ptr: Map to lpfc_hba data structure pointer.
3119  *
3120  * This routine is invoked when waiting for FCF table rediscover has been
3121  * timed out. If new FCF record(s) has (have) been discovered during the
3122  * wait period, a new FCF event shall be added to the FCOE async event
3123  * list, and then worker thread shall be waked up for processing from the
3124  * worker thread context.
3125  **/
3126 void
3127 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3128 {
3129         struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3130
3131         /* Don't send FCF rediscovery event if timer cancelled */
3132         spin_lock_irq(&phba->hbalock);
3133         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3134                 spin_unlock_irq(&phba->hbalock);
3135                 return;
3136         }
3137         /* Clear FCF rediscovery timer pending flag */
3138         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3139         /* FCF rediscovery event to worker thread */
3140         phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3141         spin_unlock_irq(&phba->hbalock);
3142         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3143                         "2776 FCF rediscover quiescent timer expired\n");
3144         /* wake up worker thread */
3145         lpfc_worker_wake_up(phba);
3146 }
3147
3148 /**
3149  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3150  * @phba: pointer to lpfc hba data structure.
3151  * @acqe_link: pointer to the async link completion queue entry.
3152  *
3153  * This routine is to parse the SLI4 link-attention link fault code and
3154  * translate it into the base driver's read link attention mailbox command
3155  * status.
3156  *
3157  * Return: Link-attention status in terms of base driver's coding.
3158  **/
3159 static uint16_t
3160 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3161                            struct lpfc_acqe_link *acqe_link)
3162 {
3163         uint16_t latt_fault;
3164
3165         switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3166         case LPFC_ASYNC_LINK_FAULT_NONE:
3167         case LPFC_ASYNC_LINK_FAULT_LOCAL:
3168         case LPFC_ASYNC_LINK_FAULT_REMOTE:
3169                 latt_fault = 0;
3170                 break;
3171         default:
3172                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3173                                 "0398 Invalid link fault code: x%x\n",
3174                                 bf_get(lpfc_acqe_link_fault, acqe_link));
3175                 latt_fault = MBXERR_ERROR;
3176                 break;
3177         }
3178         return latt_fault;
3179 }
3180
3181 /**
3182  * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3183  * @phba: pointer to lpfc hba data structure.
3184  * @acqe_link: pointer to the async link completion queue entry.
3185  *
3186  * This routine is to parse the SLI4 link attention type and translate it
3187  * into the base driver's link attention type coding.
3188  *
3189  * Return: Link attention type in terms of base driver's coding.
3190  **/
3191 static uint8_t
3192 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3193                           struct lpfc_acqe_link *acqe_link)
3194 {
3195         uint8_t att_type;
3196
3197         switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3198         case LPFC_ASYNC_LINK_STATUS_DOWN:
3199         case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3200                 att_type = LPFC_ATT_LINK_DOWN;
3201                 break;
3202         case LPFC_ASYNC_LINK_STATUS_UP:
3203                 /* Ignore physical link up events - wait for logical link up */
3204                 att_type = LPFC_ATT_RESERVED;
3205                 break;
3206         case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3207                 att_type = LPFC_ATT_LINK_UP;
3208                 break;
3209         default:
3210                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3211                                 "0399 Invalid link attention type: x%x\n",
3212                                 bf_get(lpfc_acqe_link_status, acqe_link));
3213                 att_type = LPFC_ATT_RESERVED;
3214                 break;
3215         }
3216         return att_type;
3217 }
3218
3219 /**
3220  * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3221  * @phba: pointer to lpfc hba data structure.
3222  * @acqe_link: pointer to the async link completion queue entry.
3223  *
3224  * This routine is to parse the SLI4 link-attention link speed and translate
3225  * it into the base driver's link-attention link speed coding.
3226  *
3227  * Return: Link-attention link speed in terms of base driver's coding.
3228  **/
3229 static uint8_t
3230 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3231                                 struct lpfc_acqe_link *acqe_link)
3232 {
3233         uint8_t link_speed;
3234
3235         switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3236         case LPFC_ASYNC_LINK_SPEED_ZERO:
3237         case LPFC_ASYNC_LINK_SPEED_10MBPS:
3238         case LPFC_ASYNC_LINK_SPEED_100MBPS:
3239                 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3240                 break;
3241         case LPFC_ASYNC_LINK_SPEED_1GBPS:
3242                 link_speed = LPFC_LINK_SPEED_1GHZ;
3243                 break;
3244         case LPFC_ASYNC_LINK_SPEED_10GBPS:
3245                 link_speed = LPFC_LINK_SPEED_10GHZ;
3246                 break;
3247         default:
3248                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3249                                 "0483 Invalid link-attention link speed: x%x\n",
3250                                 bf_get(lpfc_acqe_link_speed, acqe_link));
3251                 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3252                 break;
3253         }
3254         return link_speed;
3255 }
3256
3257 /**
3258  * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3259  * @phba: pointer to lpfc hba data structure.
3260  * @acqe_link: pointer to the async link completion queue entry.
3261  *
3262  * This routine is to handle the SLI4 asynchronous FCoE link event.
3263  **/
3264 static void
3265 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3266                          struct lpfc_acqe_link *acqe_link)
3267 {
3268         struct lpfc_dmabuf *mp;
3269         LPFC_MBOXQ_t *pmb;
3270         MAILBOX_t *mb;
3271         struct lpfc_mbx_read_top *la;
3272         uint8_t att_type;
3273         int rc;
3274
3275         att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3276         if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3277                 return;
3278         phba->fcoe_eventtag = acqe_link->event_tag;
3279         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3280         if (!pmb) {
3281                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3282                                 "0395 The mboxq allocation failed\n");
3283                 return;
3284         }
3285         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3286         if (!mp) {
3287                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3288                                 "0396 The lpfc_dmabuf allocation failed\n");
3289                 goto out_free_pmb;
3290         }
3291         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3292         if (!mp->virt) {
3293                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3294                                 "0397 The mbuf allocation failed\n");
3295                 goto out_free_dmabuf;
3296         }
3297
3298         /* Cleanup any outstanding ELS commands */
3299         lpfc_els_flush_all_cmd(phba);
3300
3301         /* Block ELS IOCBs until we have done process link event */
3302         phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3303
3304         /* Update link event statistics */
3305         phba->sli.slistat.link_event++;
3306
3307         /* Create lpfc_handle_latt mailbox command from link ACQE */
3308         lpfc_read_topology(phba, pmb, mp);
3309         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3310         pmb->vport = phba->pport;
3311
3312         /* Keep the link status for extra SLI4 state machine reference */
3313         phba->sli4_hba.link_state.speed =
3314                                 bf_get(lpfc_acqe_link_speed, acqe_link);
3315         phba->sli4_hba.link_state.duplex =
3316                                 bf_get(lpfc_acqe_link_duplex, acqe_link);
3317         phba->sli4_hba.link_state.status =
3318                                 bf_get(lpfc_acqe_link_status, acqe_link);
3319         phba->sli4_hba.link_state.type =
3320                                 bf_get(lpfc_acqe_link_type, acqe_link);
3321         phba->sli4_hba.link_state.number =
3322                                 bf_get(lpfc_acqe_link_number, acqe_link);
3323         phba->sli4_hba.link_state.fault =
3324                                 bf_get(lpfc_acqe_link_fault, acqe_link);
3325         phba->sli4_hba.link_state.logical_speed =
3326                         bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3327         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3328                         "2900 Async FC/FCoE Link event - Speed:%dGBit "
3329                         "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3330                         "Logical speed:%dMbps Fault:%d\n",
3331                         phba->sli4_hba.link_state.speed,
3332                         phba->sli4_hba.link_state.topology,
3333                         phba->sli4_hba.link_state.status,
3334                         phba->sli4_hba.link_state.type,
3335                         phba->sli4_hba.link_state.number,
3336                         phba->sli4_hba.link_state.logical_speed * 10,
3337                         phba->sli4_hba.link_state.fault);
3338         /*
3339          * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3340          * topology info. Note: Optional for non FC-AL ports.
3341          */
3342         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3343                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3344                 if (rc == MBX_NOT_FINISHED)
3345                         goto out_free_dmabuf;
3346                 return;
3347         }
3348         /*
3349          * For FCoE Mode: fill in all the topology information we need and call
3350          * the READ_TOPOLOGY completion routine to continue without actually
3351          * sending the READ_TOPOLOGY mailbox command to the port.
3352          */
3353         /* Parse and translate status field */
3354         mb = &pmb->u.mb;
3355         mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3356
3357         /* Parse and translate link attention fields */
3358         la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3359         la->eventTag = acqe_link->event_tag;
3360         bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3361         bf_set(lpfc_mbx_read_top_link_spd, la,
3362                lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3363
3364         /* Fake the the following irrelvant fields */
3365         bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3366         bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3367         bf_set(lpfc_mbx_read_top_il, la, 0);
3368         bf_set(lpfc_mbx_read_top_pb, la, 0);
3369         bf_set(lpfc_mbx_read_top_fa, la, 0);
3370         bf_set(lpfc_mbx_read_top_mm, la, 0);
3371
3372         /* Invoke the lpfc_handle_latt mailbox command callback function */
3373         lpfc_mbx_cmpl_read_topology(phba, pmb);
3374
3375         return;
3376
3377 out_free_dmabuf:
3378         kfree(mp);
3379 out_free_pmb:
3380         mempool_free(pmb, phba->mbox_mem_pool);
3381 }
3382
3383 /**
3384  * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3385  * @phba: pointer to lpfc hba data structure.
3386  * @acqe_fc: pointer to the async fc completion queue entry.
3387  *
3388  * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3389  * that the event was received and then issue a read_topology mailbox command so
3390  * that the rest of the driver will treat it the same as SLI3.
3391  **/
3392 static void
3393 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3394 {
3395         struct lpfc_dmabuf *mp;
3396         LPFC_MBOXQ_t *pmb;
3397         int rc;
3398
3399         if (bf_get(lpfc_trailer_type, acqe_fc) !=
3400             LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3401                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3402                                 "2895 Non FC link Event detected.(%d)\n",
3403                                 bf_get(lpfc_trailer_type, acqe_fc));
3404                 return;
3405         }
3406         /* Keep the link status for extra SLI4 state machine reference */
3407         phba->sli4_hba.link_state.speed =
3408                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
3409         phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3410         phba->sli4_hba.link_state.topology =
3411                                 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3412         phba->sli4_hba.link_state.status =
3413                                 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3414         phba->sli4_hba.link_state.type =
3415                                 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3416         phba->sli4_hba.link_state.number =
3417                                 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3418         phba->sli4_hba.link_state.fault =
3419                                 bf_get(lpfc_acqe_link_fault, acqe_fc);
3420         phba->sli4_hba.link_state.logical_speed =
3421                                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
3422         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3423                         "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3424                         "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3425                         "%dMbps Fault:%d\n",
3426                         phba->sli4_hba.link_state.speed,
3427                         phba->sli4_hba.link_state.topology,
3428                         phba->sli4_hba.link_state.status,
3429                         phba->sli4_hba.link_state.type,
3430                         phba->sli4_hba.link_state.number,
3431                         phba->sli4_hba.link_state.logical_speed * 10,
3432                         phba->sli4_hba.link_state.fault);
3433         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3434         if (!pmb) {
3435                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3436                                 "2897 The mboxq allocation failed\n");
3437                 return;
3438         }
3439         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3440         if (!mp) {
3441                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3442                                 "2898 The lpfc_dmabuf allocation failed\n");
3443                 goto out_free_pmb;
3444         }
3445         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3446         if (!mp->virt) {
3447                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3448                                 "2899 The mbuf allocation failed\n");
3449                 goto out_free_dmabuf;
3450         }
3451
3452         /* Cleanup any outstanding ELS commands */
3453         lpfc_els_flush_all_cmd(phba);
3454
3455         /* Block ELS IOCBs until we have done process link event */
3456         phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3457
3458         /* Update link event statistics */
3459         phba->sli.slistat.link_event++;
3460
3461         /* Create lpfc_handle_latt mailbox command from link ACQE */
3462         lpfc_read_topology(phba, pmb, mp);
3463         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3464         pmb->vport = phba->pport;
3465
3466         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3467         if (rc == MBX_NOT_FINISHED)
3468                 goto out_free_dmabuf;
3469         return;
3470
3471 out_free_dmabuf:
3472         kfree(mp);
3473 out_free_pmb:
3474         mempool_free(pmb, phba->mbox_mem_pool);
3475 }
3476
3477 /**
3478  * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3479  * @phba: pointer to lpfc hba data structure.
3480  * @acqe_fc: pointer to the async SLI completion queue entry.
3481  *
3482  * This routine is to handle the SLI4 asynchronous SLI events.
3483  **/
3484 static void
3485 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3486 {
3487         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3488                         "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3489                         "x%08x SLI Event Type:%d",
3490                         acqe_sli->event_data1, acqe_sli->event_data2,
3491                         bf_get(lpfc_trailer_type, acqe_sli));
3492         return;
3493 }
3494
3495 /**
3496  * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3497  * @vport: pointer to vport data structure.
3498  *
3499  * This routine is to perform Clear Virtual Link (CVL) on a vport in
3500  * response to a CVL event.
3501  *
3502  * Return the pointer to the ndlp with the vport if successful, otherwise
3503  * return NULL.
3504  **/
3505 static struct lpfc_nodelist *
3506 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3507 {
3508         struct lpfc_nodelist *ndlp;
3509         struct Scsi_Host *shost;
3510         struct lpfc_hba *phba;
3511
3512         if (!vport)
3513                 return NULL;
3514         phba = vport->phba;
3515         if (!phba)
3516                 return NULL;
3517         ndlp = lpfc_findnode_did(vport, Fabric_DID);
3518         if (!ndlp) {
3519                 /* Cannot find existing Fabric ndlp, so allocate a new one */
3520                 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3521                 if (!ndlp)
3522                         return 0;
3523                 lpfc_nlp_init(vport, ndlp, Fabric_DID);
3524                 /* Set the node type */
3525                 ndlp->nlp_type |= NLP_FABRIC;
3526                 /* Put ndlp onto node list */
3527                 lpfc_enqueue_node(vport, ndlp);
3528         } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3529                 /* re-setup ndlp without removing from node list */
3530                 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3531                 if (!ndlp)
3532                         return 0;
3533         }
3534         if ((phba->pport->port_state < LPFC_FLOGI) &&
3535                 (phba->pport->port_state != LPFC_VPORT_FAILED))
3536                 return NULL;
3537         /* If virtual link is not yet instantiated ignore CVL */
3538         if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3539                 && (vport->port_state != LPFC_VPORT_FAILED))
3540                 return NULL;
3541         shost = lpfc_shost_from_vport(vport);
3542         if (!shost)
3543                 return NULL;
3544         lpfc_linkdown_port(vport);
3545         lpfc_cleanup_pending_mbox(vport);
3546         spin_lock_irq(shost->host_lock);
3547         vport->fc_flag |= FC_VPORT_CVL_RCVD;
3548         spin_unlock_irq(shost->host_lock);
3549
3550         return ndlp;
3551 }
3552
3553 /**
3554  * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3555  * @vport: pointer to lpfc hba data structure.
3556  *
3557  * This routine is to perform Clear Virtual Link (CVL) on all vports in
3558  * response to a FCF dead event.
3559  **/
3560 static void
3561 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3562 {
3563         struct lpfc_vport **vports;
3564         int i;
3565
3566         vports = lpfc_create_vport_work_array(phba);
3567         if (vports)
3568                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3569                         lpfc_sli4_perform_vport_cvl(vports[i]);
3570         lpfc_destroy_vport_work_array(phba, vports);
3571 }
3572
3573 /**
3574  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
3575  * @phba: pointer to lpfc hba data structure.
3576  * @acqe_link: pointer to the async fcoe completion queue entry.
3577  *
3578  * This routine is to handle the SLI4 asynchronous fcoe event.
3579  **/
3580 static void
3581 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3582                         struct lpfc_acqe_fip *acqe_fip)
3583 {
3584         uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
3585         int rc;
3586         struct lpfc_vport *vport;
3587         struct lpfc_nodelist *ndlp;
3588         struct Scsi_Host  *shost;
3589         int active_vlink_present;
3590         struct lpfc_vport **vports;
3591         int i;
3592
3593         phba->fc_eventTag = acqe_fip->event_tag;
3594         phba->fcoe_eventtag = acqe_fip->event_tag;
3595         switch (event_type) {
3596         case LPFC_FIP_EVENT_TYPE_NEW_FCF:
3597         case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
3598                 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
3599                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3600                                         LOG_DISCOVERY,
3601                                         "2546 New FCF event, evt_tag:x%x, "
3602                                         "index:x%x\n",
3603                                         acqe_fip->event_tag,
3604                                         acqe_fip->index);
3605                 else
3606                         lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3607                                         LOG_DISCOVERY,
3608                                         "2788 FCF param modified event, "
3609                                         "evt_tag:x%x, index:x%x\n",
3610                                         acqe_fip->event_tag,
3611                                         acqe_fip->index);
3612                 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3613                         /*
3614                          * During period of FCF discovery, read the FCF
3615                          * table record indexed by the event to update
3616                          * FCF roundrobin failover eligible FCF bmask.
3617                          */
3618                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3619                                         LOG_DISCOVERY,
3620                                         "2779 Read FCF (x%x) for updating "
3621                                         "roundrobin FCF failover bmask\n",
3622                                         acqe_fip->index);
3623                         rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
3624                 }
3625
3626                 /* If the FCF discovery is in progress, do nothing. */
3627                 spin_lock_irq(&phba->hbalock);
3628                 if (phba->hba_flag & FCF_TS_INPROG) {
3629                         spin_unlock_irq(&phba->hbalock);
3630                         break;
3631                 }
3632                 /* If fast FCF failover rescan event is pending, do nothing */
3633                 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3634                         spin_unlock_irq(&phba->hbalock);
3635                         break;
3636                 }
3637
3638                 /* If the FCF has been in discovered state, do nothing. */
3639                 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3640                         spin_unlock_irq(&phba->hbalock);
3641                         break;
3642                 }
3643                 spin_unlock_irq(&phba->hbalock);
3644
3645                 /* Otherwise, scan the entire FCF table and re-discover SAN */
3646                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3647                                 "2770 Start FCF table scan per async FCF "
3648                                 "event, evt_tag:x%x, index:x%x\n",
3649                                 acqe_fip->event_tag, acqe_fip->index);
3650                 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3651                                                      LPFC_FCOE_FCF_GET_FIRST);
3652                 if (rc)
3653                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3654                                         "2547 Issue FCF scan read FCF mailbox "
3655                                         "command failed (x%x)\n", rc);
3656                 break;
3657
3658         case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
3659                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3660                         "2548 FCF Table full count 0x%x tag 0x%x\n",
3661                         bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
3662                         acqe_fip->event_tag);
3663                 break;
3664
3665         case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
3666                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3667                         "2549 FCF (x%x) disconnected from network, "
3668                         "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
3669                 /*
3670                  * If we are in the middle of FCF failover process, clear
3671                  * the corresponding FCF bit in the roundrobin bitmap.
3672                  */
3673                 spin_lock_irq(&phba->hbalock);
3674                 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3675                         spin_unlock_irq(&phba->hbalock);
3676                         /* Update FLOGI FCF failover eligible FCF bmask */
3677                         lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
3678                         break;
3679                 }
3680                 spin_unlock_irq(&phba->hbalock);
3681
3682                 /* If the event is not for currently used fcf do nothing */
3683                 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
3684                         break;
3685
3686                 /*
3687                  * Otherwise, request the port to rediscover the entire FCF
3688                  * table for a fast recovery from case that the current FCF
3689                  * is no longer valid as we are not in the middle of FCF
3690                  * failover process already.
3691                  */
3692                 spin_lock_irq(&phba->hbalock);
3693                 /* Mark the fast failover process in progress */
3694                 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3695                 spin_unlock_irq(&phba->hbalock);
3696
3697                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3698                                 "2771 Start FCF fast failover process due to "
3699                                 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3700                                 "\n", acqe_fip->event_tag, acqe_fip->index);
3701                 rc = lpfc_sli4_redisc_fcf_table(phba);
3702                 if (rc) {
3703                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3704                                         LOG_DISCOVERY,
3705                                         "2772 Issue FCF rediscover mabilbox "
3706                                         "command failed, fail through to FCF "
3707                                         "dead event\n");
3708                         spin_lock_irq(&phba->hbalock);
3709                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3710                         spin_unlock_irq(&phba->hbalock);
3711                         /*
3712                          * Last resort will fail over by treating this
3713                          * as a link down to FCF registration.
3714                          */
3715                         lpfc_sli4_fcf_dead_failthrough(phba);
3716                 } else {
3717                         /* Reset FCF roundrobin bmask for new discovery */
3718                         lpfc_sli4_clear_fcf_rr_bmask(phba);
3719                         /*
3720                          * Handling fast FCF failover to a DEAD FCF event is
3721                          * considered equalivant to receiving CVL to all vports.
3722                          */
3723                         lpfc_sli4_perform_all_vport_cvl(phba);
3724                 }
3725                 break;
3726         case LPFC_FIP_EVENT_TYPE_CVL:
3727                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3728                         "2718 Clear Virtual Link Received for VPI 0x%x"
3729                         " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3730
3731                 vport = lpfc_find_vport_by_vpid(phba,
3732                                                 acqe_fip->index);
3733                 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3734                 if (!ndlp)
3735                         break;
3736                 active_vlink_present = 0;
3737
3738                 vports = lpfc_create_vport_work_array(phba);
3739                 if (vports) {
3740                         for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3741                                         i++) {
3742                                 if ((!(vports[i]->fc_flag &
3743                                         FC_VPORT_CVL_RCVD)) &&
3744                                         (vports[i]->port_state > LPFC_FDISC)) {
3745                                         active_vlink_present = 1;
3746                                         break;
3747                                 }
3748                         }
3749                         lpfc_destroy_vport_work_array(phba, vports);
3750                 }
3751
3752                 if (active_vlink_present) {
3753                         /*
3754                          * If there are other active VLinks present,
3755                          * re-instantiate the Vlink using FDISC.
3756                          */
3757                         mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3758                         shost = lpfc_shost_from_vport(vport);
3759                         spin_lock_irq(shost->host_lock);
3760                         ndlp->nlp_flag |= NLP_DELAY_TMO;
3761                         spin_unlock_irq(shost->host_lock);
3762                         ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3763                         vport->port_state = LPFC_FDISC;
3764                 } else {
3765                         /*
3766                          * Otherwise, we request port to rediscover
3767                          * the entire FCF table for a fast recovery
3768                          * from possible case that the current FCF
3769                          * is no longer valid if we are not already
3770                          * in the FCF failover process.
3771                          */
3772                         spin_lock_irq(&phba->hbalock);
3773                         if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3774                                 spin_unlock_irq(&phba->hbalock);
3775                                 break;
3776                         }
3777                         /* Mark the fast failover process in progress */
3778                         phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3779                         spin_unlock_irq(&phba->hbalock);
3780                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3781                                         LOG_DISCOVERY,
3782                                         "2773 Start FCF failover per CVL, "
3783                                         "evt_tag:x%x\n", acqe_fip->event_tag);
3784                         rc = lpfc_sli4_redisc_fcf_table(phba);
3785                         if (rc) {
3786                                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3787                                                 LOG_DISCOVERY,
3788                                                 "2774 Issue FCF rediscover "
3789                                                 "mabilbox command failed, "
3790                                                 "through to CVL event\n");
3791                                 spin_lock_irq(&phba->hbalock);
3792                                 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3793                                 spin_unlock_irq(&phba->hbalock);
3794                                 /*
3795                                  * Last resort will be re-try on the
3796                                  * the current registered FCF entry.
3797                                  */
3798                                 lpfc_retry_pport_discovery(phba);
3799                         } else
3800                                 /*
3801                                  * Reset FCF roundrobin bmask for new
3802                                  * discovery.
3803                                  */
3804                                 lpfc_sli4_clear_fcf_rr_bmask(phba);
3805                 }
3806                 break;
3807         default:
3808                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3809                         "0288 Unknown FCoE event type 0x%x event tag "
3810                         "0x%x\n", event_type, acqe_fip->event_tag);
3811                 break;
3812         }
3813 }
3814
3815 /**
3816  * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3817  * @phba: pointer to lpfc hba data structure.
3818  * @acqe_link: pointer to the async dcbx completion queue entry.
3819  *
3820  * This routine is to handle the SLI4 asynchronous dcbx event.
3821  **/
3822 static void
3823 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3824                          struct lpfc_acqe_dcbx *acqe_dcbx)
3825 {
3826         phba->fc_eventTag = acqe_dcbx->event_tag;
3827         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3828                         "0290 The SLI4 DCBX asynchronous event is not "
3829                         "handled yet\n");
3830 }
3831
3832 /**
3833  * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3834  * @phba: pointer to lpfc hba data structure.
3835  * @acqe_link: pointer to the async grp5 completion queue entry.
3836  *
3837  * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3838  * is an asynchronous notified of a logical link speed change.  The Port
3839  * reports the logical link speed in units of 10Mbps.
3840  **/
3841 static void
3842 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3843                          struct lpfc_acqe_grp5 *acqe_grp5)
3844 {
3845         uint16_t prev_ll_spd;
3846
3847         phba->fc_eventTag = acqe_grp5->event_tag;
3848         phba->fcoe_eventtag = acqe_grp5->event_tag;
3849         prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3850         phba->sli4_hba.link_state.logical_speed =
3851                 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3852         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3853                         "2789 GRP5 Async Event: Updating logical link speed "
3854                         "from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3855                         (phba->sli4_hba.link_state.logical_speed*10));
3856 }
3857
3858 /**
3859  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3860  * @phba: pointer to lpfc hba data structure.
3861  *
3862  * This routine is invoked by the worker thread to process all the pending
3863  * SLI4 asynchronous events.
3864  **/
3865 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3866 {
3867         struct lpfc_cq_event *cq_event;
3868
3869         /* First, declare the async event has been handled */
3870         spin_lock_irq(&phba->hbalock);
3871         phba->hba_flag &= ~ASYNC_EVENT;
3872         spin_unlock_irq(&phba->hbalock);
3873         /* Now, handle all the async events */
3874         while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3875                 /* Get the first event from the head of the event queue */
3876                 spin_lock_irq(&phba->hbalock);
3877                 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3878                                  cq_event, struct lpfc_cq_event, list);
3879                 spin_unlock_irq(&phba->hbalock);
3880                 /* Process the asynchronous event */
3881                 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3882                 case LPFC_TRAILER_CODE_LINK:
3883                         lpfc_sli4_async_link_evt(phba,
3884                                                  &cq_event->cqe.acqe_link);
3885                         break;
3886                 case LPFC_TRAILER_CODE_FCOE:
3887                         lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
3888                         break;
3889                 case LPFC_TRAILER_CODE_DCBX:
3890                         lpfc_sli4_async_dcbx_evt(phba,
3891                                                  &cq_event->cqe.acqe_dcbx);
3892                         break;
3893                 case LPFC_TRAILER_CODE_GRP5:
3894                         lpfc_sli4_async_grp5_evt(phba,
3895                                                  &cq_event->cqe.acqe_grp5);
3896                         break;
3897                 case LPFC_TRAILER_CODE_FC:
3898                         lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
3899                         break;
3900                 case LPFC_TRAILER_CODE_SLI:
3901                         lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
3902                         break;
3903                 default:
3904                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3905                                         "1804 Invalid asynchrous event code: "
3906                                         "x%x\n", bf_get(lpfc_trailer_code,
3907                                         &cq_event->cqe.mcqe_cmpl));
3908                         break;
3909                 }
3910                 /* Free the completion event processed to the free pool */
3911                 lpfc_sli4_cq_event_release(phba, cq_event);
3912         }
3913 }
3914
3915 /**
3916  * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3917  * @phba: pointer to lpfc hba data structure.
3918  *
3919  * This routine is invoked by the worker thread to process FCF table
3920  * rediscovery pending completion event.
3921  **/
3922 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3923 {
3924         int rc;
3925
3926         spin_lock_irq(&phba->hbalock);
3927         /* Clear FCF rediscovery timeout event */
3928         phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3929         /* Clear driver fast failover FCF record flag */
3930         phba->fcf.failover_rec.flag = 0;
3931         /* Set state for FCF fast failover */
3932         phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3933         spin_unlock_irq(&phba->hbalock);
3934
3935         /* Scan FCF table from the first entry to re-discover SAN */
3936         lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3937                         "2777 Start post-quiescent FCF table scan\n");
3938         rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3939         if (rc)
3940                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3941                                 "2747 Issue FCF scan read FCF mailbox "
3942                                 "command failed 0x%x\n", rc);
3943 }
3944
3945 /**
3946  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3947  * @phba: pointer to lpfc hba data structure.
3948  * @dev_grp: The HBA PCI-Device group number.
3949  *
3950  * This routine is invoked to set up the per HBA PCI-Device group function
3951  * API jump table entries.
3952  *
3953  * Return: 0 if success, otherwise -ENODEV
3954  **/
3955 int
3956 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3957 {
3958         int rc;
3959
3960         /* Set up lpfc PCI-device group */
3961         phba->pci_dev_grp = dev_grp;
3962
3963         /* The LPFC_PCI_DEV_OC uses SLI4 */
3964         if (dev_grp == LPFC_PCI_DEV_OC)
3965                 phba->sli_rev = LPFC_SLI_REV4;
3966
3967         /* Set up device INIT API function jump table */
3968         rc = lpfc_init_api_table_setup(phba, dev_grp);
3969         if (rc)
3970                 return -ENODEV;
3971         /* Set up SCSI API function jump table */
3972         rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3973         if (rc)
3974                 return -ENODEV;
3975         /* Set up SLI API function jump table */
3976         rc = lpfc_sli_api_table_setup(phba, dev_grp);
3977         if (rc)
3978                 return -ENODEV;
3979         /* Set up MBOX API function jump table */
3980         rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3981         if (rc)
3982                 return -ENODEV;
3983
3984         return 0;
3985 }
3986
3987 /**
3988  * lpfc_log_intr_mode - Log the active interrupt mode
3989  * @phba: pointer to lpfc hba data structure.
3990  * @intr_mode: active interrupt mode adopted.
3991  *
3992  * This routine it invoked to log the currently used active interrupt mode
3993  * to the device.
3994  **/
3995 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3996 {
3997         switch (intr_mode) {
3998         case 0:
3999                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4000                                 "0470 Enable INTx interrupt mode.\n");
4001                 break;
4002         case 1:
4003                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4004                                 "0481 Enabled MSI interrupt mode.\n");
4005                 break;
4006         case 2:
4007                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4008                                 "0480 Enabled MSI-X interrupt mode.\n");
4009                 break;
4010         default:
4011                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4012                                 "0482 Illegal interrupt mode.\n");
4013                 break;
4014         }
4015         return;
4016 }
4017
4018 /**
4019  * lpfc_enable_pci_dev - Enable a generic PCI device.
4020  * @phba: pointer to lpfc hba data structure.
4021  *
4022  * This routine is invoked to enable the PCI device that is common to all
4023  * PCI devices.
4024  *
4025  * Return codes
4026  *      0 - successful
4027  *      other values - error
4028  **/
4029 static int
4030 lpfc_enable_pci_dev(struct lpfc_hba *phba)
4031 {
4032         struct pci_dev *pdev;
4033         int bars = 0;
4034
4035         /* Obtain PCI device reference */
4036         if (!phba->pcidev)
4037                 goto out_error;
4038         else
4039                 pdev = phba->pcidev;
4040         /* Select PCI BARs */
4041         bars = pci_select_bars(pdev, IORESOURCE_MEM);
4042         /* Enable PCI device */
4043         if (pci_enable_device_mem(pdev))
4044                 goto out_error;
4045         /* Request PCI resource for the device */
4046         if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
4047                 goto out_disable_device;
4048         /* Set up device as PCI master and save state for EEH */
4049         pci_set_master(pdev);
4050         pci_try_set_mwi(pdev);
4051         pci_save_state(pdev);
4052
4053         /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4054         if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
4055                 pdev->needs_freset = 1;
4056
4057         return 0;
4058
4059 out_disable_device:
4060         pci_disable_device(pdev);
4061 out_error:
4062         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4063                         "1401 Failed to enable pci device, bars:x%x\n", bars);
4064         return -ENODEV;
4065 }
4066
4067 /**
4068  * lpfc_disable_pci_dev - Disable a generic PCI device.
4069  * @phba: pointer to lpfc hba data structure.
4070  *
4071  * This routine is invoked to disable the PCI device that is common to all
4072  * PCI devices.
4073  **/
4074 static void
4075 lpfc_disable_pci_dev(struct lpfc_hba *phba)
4076 {
4077         struct pci_dev *pdev;
4078         int bars;
4079
4080         /* Obtain PCI device reference */
4081         if (!phba->pcidev)
4082                 return;
4083         else
4084                 pdev = phba->pcidev;
4085         /* Select PCI BARs */
4086         bars = pci_select_bars(pdev, IORESOURCE_MEM);
4087         /* Release PCI resource and disable PCI device */
4088         pci_release_selected_regions(pdev, bars);
4089         pci_disable_device(pdev);
4090         /* Null out PCI private reference to driver */
4091         pci_set_drvdata(pdev, NULL);
4092
4093         return;
4094 }
4095
4096 /**
4097  * lpfc_reset_hba - Reset a hba
4098  * @phba: pointer to lpfc hba data structure.
4099  *
4100  * This routine is invoked to reset a hba device. It brings the HBA
4101  * offline, performs a board restart, and then brings the board back
4102  * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4103  * on outstanding mailbox commands.
4104  **/
4105 void
4106 lpfc_reset_hba(struct lpfc_hba *phba)
4107 {
4108         /* If resets are disabled then set error state and return. */
4109         if (!phba->cfg_enable_hba_reset) {
4110                 phba->link_state = LPFC_HBA_ERROR;
4111                 return;
4112         }
4113         lpfc_offline_prep(phba);
4114         lpfc_offline(phba);
4115         lpfc_sli_brdrestart(phba);
4116         lpfc_online(phba);
4117         lpfc_unblock_mgmt_io(phba);
4118 }
4119
4120 /**
4121  * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4122  * @phba: pointer to lpfc hba data structure.
4123  *
4124  * This function enables the PCI SR-IOV virtual functions to a physical
4125  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4126  * enable the number of virtual functions to the physical function. As
4127  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4128  * API call does not considered as an error condition for most of the device.
4129  **/
4130 uint16_t
4131 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4132 {
4133         struct pci_dev *pdev = phba->pcidev;
4134         uint16_t nr_virtfn;
4135         int pos;
4136
4137         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4138         if (pos == 0)
4139                 return 0;
4140
4141         pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4142         return nr_virtfn;
4143 }
4144
4145 /**
4146  * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4147  * @phba: pointer to lpfc hba data structure.
4148  * @nr_vfn: number of virtual functions to be enabled.
4149  *
4150  * This function enables the PCI SR-IOV virtual functions to a physical
4151  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4152  * enable the number of virtual functions to the physical function. As
4153  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4154  * API call does not considered as an error condition for most of the device.
4155  **/
4156 int
4157 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4158 {
4159         struct pci_dev *pdev = phba->pcidev;
4160         uint16_t max_nr_vfn;
4161         int rc;
4162
4163         max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4164         if (nr_vfn > max_nr_vfn) {
4165                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4166                                 "3057 Requested vfs (%d) greater than "
4167                                 "supported vfs (%d)", nr_vfn, max_nr_vfn);
4168                 return -EINVAL;
4169         }
4170
4171         rc = pci_enable_sriov(pdev, nr_vfn);
4172         if (rc) {
4173                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4174                                 "2806 Failed to enable sriov on this device "
4175                                 "with vfn number nr_vf:%d, rc:%d\n",
4176                                 nr_vfn, rc);
4177         } else
4178                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4179                                 "2807 Successful enable sriov on this device "
4180                                 "with vfn number nr_vf:%d\n", nr_vfn);
4181         return rc;
4182 }
4183
4184 /**
4185  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4186  * @phba: pointer to lpfc hba data structure.
4187  *
4188  * This routine is invoked to set up the driver internal resources specific to
4189  * support the SLI-3 HBA device it attached to.
4190  *
4191  * Return codes
4192  *      0 - successful
4193  *      other values - error
4194  **/
4195 static int
4196 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4197 {
4198         struct lpfc_sli *psli;
4199         int rc;
4200
4201         /*
4202          * Initialize timers used by driver
4203          */
4204
4205         /* Heartbeat timer */
4206         init_timer(&phba->hb_tmofunc);
4207         phba->hb_tmofunc.function = lpfc_hb_timeout;
4208         phba->hb_tmofunc.data = (unsigned long)phba;
4209
4210         psli = &phba->sli;
4211         /* MBOX heartbeat timer */
4212         init_timer(&psli->mbox_tmo);
4213         psli->mbox_tmo.function = lpfc_mbox_timeout;
4214         psli->mbox_tmo.data = (unsigned long) phba;
4215         /* FCP polling mode timer */
4216         init_timer(&phba->fcp_poll_timer);
4217         phba->fcp_poll_timer.function = lpfc_poll_timeout;
4218         phba->fcp_poll_timer.data = (unsigned long) phba;
4219         /* Fabric block timer */
4220         init_timer(&phba->fabric_block_timer);
4221         phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4222         phba->fabric_block_timer.data = (unsigned long) phba;
4223         /* EA polling mode timer */
4224         init_timer(&phba->eratt_poll);
4225         phba->eratt_poll.function = lpfc_poll_eratt;
4226         phba->eratt_poll.data = (unsigned long) phba;
4227
4228         /* Host attention work mask setup */
4229         phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4230         phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4231
4232         /* Get all the module params for configuring this host */
4233         lpfc_get_cfgparam(phba);
4234         if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4235                 phba->menlo_flag |= HBA_MENLO_SUPPORT;
4236                 /* check for menlo minimum sg count */
4237                 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4238                         phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4239         }
4240
4241         /*
4242          * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4243          * used to create the sg_dma_buf_pool must be dynamically calculated.
4244          * 2 segments are added since the IOCB needs a command and response bde.
4245          */
4246         phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4247                 sizeof(struct fcp_rsp) +
4248                         ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4249
4250         if (phba->cfg_enable_bg) {
4251                 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4252                 phba->cfg_sg_dma_buf_size +=
4253                         phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4254         }
4255
4256         /* Also reinitialize the host templates with new values. */
4257         lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4258         lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4259
4260         phba->max_vpi = LPFC_MAX_VPI;
4261         /* This will be set to correct value after config_port mbox */
4262         phba->max_vports = 0;
4263
4264         /*
4265          * Initialize the SLI Layer to run with lpfc HBAs.
4266          */
4267         lpfc_sli_setup(phba);
4268         lpfc_sli_queue_setup(phba);
4269
4270         /* Allocate device driver memory */
4271         if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4272                 return -ENOMEM;
4273
4274         /*
4275          * Enable sr-iov virtual functions if supported and configured
4276          * through the module parameter.
4277          */
4278         if (phba->cfg_sriov_nr_virtfn > 0) {
4279                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4280                                                  phba->cfg_sriov_nr_virtfn);
4281                 if (rc) {
4282                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4283                                         "2808 Requested number of SR-IOV "
4284                                         "virtual functions (%d) is not "
4285                                         "supported\n",
4286                                         phba->cfg_sriov_nr_virtfn);
4287                         phba->cfg_sriov_nr_virtfn = 0;
4288                 }
4289         }
4290
4291         return 0;
4292 }
4293
4294 /**
4295  * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4296  * @phba: pointer to lpfc hba data structure.
4297  *
4298  * This routine is invoked to unset the driver internal resources set up
4299  * specific for supporting the SLI-3 HBA device it attached to.
4300  **/
4301 static void
4302 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4303 {
4304         /* Free device driver memory allocated */
4305         lpfc_mem_free_all(phba);
4306
4307         return;
4308 }
4309
4310 /**
4311  * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4312  * @phba: pointer to lpfc hba data structure.
4313  *
4314  * This routine is invoked to set up the driver internal resources specific to
4315  * support the SLI-4 HBA device it attached to.
4316  *
4317  * Return codes
4318  *      0 - successful
4319  *      other values - error
4320  **/
4321 static int
4322 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4323 {
4324         struct lpfc_sli *psli;
4325         LPFC_MBOXQ_t *mboxq;
4326         int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
4327         uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4328         struct lpfc_mqe *mqe;
4329         int longs, sli_family;
4330
4331         /* Before proceed, wait for POST done and device ready */
4332         rc = lpfc_sli4_post_status_check(phba);
4333         if (rc)
4334                 return -ENODEV;
4335
4336         /*
4337          * Initialize timers used by driver
4338          */
4339
4340         /* Heartbeat timer */
4341         init_timer(&phba->hb_tmofunc);
4342         phba->hb_tmofunc.function = lpfc_hb_timeout;
4343         phba->hb_tmofunc.data = (unsigned long)phba;
4344         init_timer(&phba->rrq_tmr);
4345         phba->rrq_tmr.function = lpfc_rrq_timeout;
4346         phba->rrq_tmr.data = (unsigned long)phba;
4347
4348         psli = &phba->sli;
4349         /* MBOX heartbeat timer */
4350         init_timer(&psli->mbox_tmo);
4351         psli->mbox_tmo.function = lpfc_mbox_timeout;
4352         psli->mbox_tmo.data = (unsigned long) phba;
4353         /* Fabric block timer */
4354         init_timer(&phba->fabric_block_timer);
4355         phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4356         phba->fabric_block_timer.data = (unsigned long) phba;
4357         /* EA polling mode timer */
4358         init_timer(&phba->eratt_poll);
4359         phba->eratt_poll.function = lpfc_poll_eratt;
4360         phba->eratt_poll.data = (unsigned long) phba;
4361         /* FCF rediscover timer */
4362         init_timer(&phba->fcf.redisc_wait);
4363         phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4364         phba->fcf.redisc_wait.data = (unsigned long)phba;
4365
4366         /*
4367          * Control structure for handling external multi-buffer mailbox
4368          * command pass-through.
4369          */
4370         memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4371                 sizeof(struct lpfc_mbox_ext_buf_ctx));
4372         INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4373
4374         /*
4375          * We need to do a READ_CONFIG mailbox command here before
4376          * calling lpfc_get_cfgparam. For VFs this will report the
4377          * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4378          * All of the resources allocated
4379          * for this Port are tied to these values.
4380          */
4381         /* Get all the module params for configuring this host */
4382         lpfc_get_cfgparam(phba);
4383         phba->max_vpi = LPFC_MAX_VPI;
4384         /* This will be set to correct value after the read_config mbox */
4385         phba->max_vports = 0;
4386
4387         /* Program the default value of vlan_id and fc_map */
4388         phba->valid_vlan = 0;
4389         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4390         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4391         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4392
4393         /*
4394          * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4395          * used to create the sg_dma_buf_pool must be dynamically calculated.
4396          * 2 segments are added since the IOCB needs a command and response bde.
4397          * To insure that the scsi sgl does not cross a 4k page boundary only
4398          * sgl sizes of must be a power of 2.
4399          */
4400         buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4401                     ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4402
4403         sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4404         max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4405         switch (sli_family) {
4406         case LPFC_SLI_INTF_FAMILY_BE2:
4407         case LPFC_SLI_INTF_FAMILY_BE3:
4408                 /* There is a single hint for BE - 2 pages per BPL. */
4409                 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
4410                     LPFC_SLI_INTF_SLI_HINT1_1)
4411                         max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4412                 break;
4413         case LPFC_SLI_INTF_FAMILY_LNCR_A0:
4414         case LPFC_SLI_INTF_FAMILY_LNCR_B0:
4415         default:
4416                 break;
4417         }
4418         for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4419              dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4420              dma_buf_size = dma_buf_size << 1)
4421                 ;
4422         if (dma_buf_size == max_buf_size)
4423                 phba->cfg_sg_seg_cnt = (dma_buf_size -
4424                         sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4425                         (2 * sizeof(struct sli4_sge))) /
4426                                 sizeof(struct sli4_sge);
4427         phba->cfg_sg_dma_buf_size = dma_buf_size;
4428
4429         /* Initialize buffer queue management fields */
4430         hbq_count = lpfc_sli_hbq_count();
4431         for (i = 0; i < hbq_count; ++i)
4432                 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4433         INIT_LIST_HEAD(&phba->rb_pend_list);
4434         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4435         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4436
4437         /*
4438          * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4439          */
4440         /* Initialize the Abort scsi buffer list used by driver */
4441         spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4442         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4443         /* This abort list used by worker thread */
4444         spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4445
4446         /*
4447          * Initialize driver internal slow-path work queues
4448          */
4449
4450         /* Driver internel slow-path CQ Event pool */
4451         INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4452         /* Response IOCB work queue list */
4453         INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4454         /* Asynchronous event CQ Event work queue list */
4455         INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4456         /* Fast-path XRI aborted CQ Event work queue list */
4457         INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4458         /* Slow-path XRI aborted CQ Event work queue list */
4459         INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4460         /* Receive queue CQ Event work queue list */
4461         INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4462
4463         /* Initialize extent block lists. */
4464         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4465         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4466         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4467         INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4468
4469         /* Initialize the driver internal SLI layer lists. */
4470         lpfc_sli_setup(phba);
4471         lpfc_sli_queue_setup(phba);
4472
4473         /* Allocate device driver memory */
4474         rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4475         if (rc)
4476                 return -ENOMEM;
4477
4478         /* IF Type 2 ports get initialized now. */
4479         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4480             LPFC_SLI_INTF_IF_TYPE_2) {
4481                 rc = lpfc_pci_function_reset(phba);
4482                 if (unlikely(rc))
4483                         return -ENODEV;
4484         }
4485
4486         /* Create the bootstrap mailbox command */
4487         rc = lpfc_create_bootstrap_mbox(phba);
4488         if (unlikely(rc))
4489                 goto out_free_mem;
4490
4491         /* Set up the host's endian order with the device. */
4492         rc = lpfc_setup_endian_order(phba);
4493         if (unlikely(rc))
4494                 goto out_free_bsmbx;
4495
4496         /* Set up the hba's configuration parameters. */
4497         rc = lpfc_sli4_read_config(phba);
4498         if (unlikely(rc))
4499                 goto out_free_bsmbx;
4500
4501         /* IF Type 0 ports get initialized now. */
4502         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4503             LPFC_SLI_INTF_IF_TYPE_0) {
4504                 rc = lpfc_pci_function_reset(phba);
4505                 if (unlikely(rc))
4506                         goto out_free_bsmbx;
4507         }
4508
4509         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4510                                                        GFP_KERNEL);
4511         if (!mboxq) {
4512                 rc = -ENOMEM;
4513                 goto out_free_bsmbx;
4514         }
4515
4516         /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
4517         lpfc_supported_pages(mboxq);
4518         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4519         if (!rc) {
4520                 mqe = &mboxq->u.mqe;
4521                 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4522                        LPFC_MAX_SUPPORTED_PAGES);
4523                 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4524                         switch (pn_page[i]) {
4525                         case LPFC_SLI4_PARAMETERS:
4526                                 phba->sli4_hba.pc_sli4_params.supported = 1;
4527                                 break;
4528                         default:
4529                                 break;
4530                         }
4531                 }
4532                 /* Read the port's SLI4 Parameters capabilities if supported. */
4533                 if (phba->sli4_hba.pc_sli4_params.supported)
4534                         rc = lpfc_pc_sli4_params_get(phba, mboxq);
4535                 if (rc) {
4536                         mempool_free(mboxq, phba->mbox_mem_pool);
4537                         rc = -EIO;
4538                         goto out_free_bsmbx;
4539                 }
4540         }
4541         /*
4542          * Get sli4 parameters that override parameters from Port capabilities.
4543          * If this call fails, it isn't critical unless the SLI4 parameters come
4544          * back in conflict.
4545          */
4546         rc = lpfc_get_sli4_parameters(phba, mboxq);
4547         if (rc) {
4548                 if (phba->sli4_hba.extents_in_use &&
4549                     phba->sli4_hba.rpi_hdrs_in_use) {
4550                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4551                                 "2999 Unsupported SLI4 Parameters "
4552                                 "Extents and RPI headers enabled.\n");
4553                         goto out_free_bsmbx;
4554                 }
4555         }
4556         mempool_free(mboxq, phba->mbox_mem_pool);
4557         /* Verify all the SLI4 queues */
4558         rc = lpfc_sli4_queue_verify(phba);
4559         if (rc)
4560                 goto out_free_bsmbx;
4561
4562         /* Create driver internal CQE event pool */
4563         rc = lpfc_sli4_cq_event_pool_create(phba);
4564         if (rc)
4565                 goto out_free_bsmbx;
4566
4567         /* Initialize and populate the iocb list per host */
4568         rc = lpfc_init_sgl_list(phba);
4569         if (rc) {
4570                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4571                                 "1400 Failed to initialize sgl list.\n");
4572                 goto out_destroy_cq_event_pool;
4573         }
4574         rc = lpfc_init_active_sgl_array(phba);
4575         if (rc) {
4576                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4577                                 "1430 Failed to initialize sgl list.\n");
4578                 goto out_free_sgl_list;
4579         }
4580         rc = lpfc_sli4_init_rpi_hdrs(phba);
4581         if (rc) {
4582                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4583                                 "1432 Failed to initialize rpi headers.\n");
4584                 goto out_free_active_sgl;
4585         }
4586
4587         /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
4588         longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4589         phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4590                                          GFP_KERNEL);
4591         if (!phba->fcf.fcf_rr_bmask) {
4592                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4593                                 "2759 Failed allocate memory for FCF round "
4594                                 "robin failover bmask\n");
4595                 rc = -ENOMEM;
4596                 goto out_remove_rpi_hdrs;
4597         }
4598
4599         /*
4600          * The cfg_fcp_eq_count can be zero whenever there is exactly one
4601          * interrupt vector.  This is not an error
4602          */
4603         if (phba->cfg_fcp_eq_count) {
4604                 phba->sli4_hba.fcp_eq_hdl =
4605                                 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4606                                     phba->cfg_fcp_eq_count), GFP_KERNEL);
4607                 if (!phba->sli4_hba.fcp_eq_hdl) {
4608                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4609                                         "2572 Failed allocate memory for "
4610                                         "fast-path per-EQ handle array\n");
4611                         rc = -ENOMEM;
4612                         goto out_free_fcf_rr_bmask;
4613                 }
4614         }
4615
4616         phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4617                                       phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4618         if (!phba->sli4_hba.msix_entries) {
4619                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4620                                 "2573 Failed allocate memory for msi-x "
4621                                 "interrupt vector entries\n");
4622                 rc = -ENOMEM;
4623                 goto out_free_fcp_eq_hdl;
4624         }
4625
4626         /*
4627          * Enable sr-iov virtual functions if supported and configured
4628          * through the module parameter.
4629          */
4630         if (phba->cfg_sriov_nr_virtfn > 0) {
4631                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4632                                                  phba->cfg_sriov_nr_virtfn);
4633                 if (rc) {
4634                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4635                                         "3020 Requested number of SR-IOV "
4636                                         "virtual functions (%d) is not "
4637                                         "supported\n",
4638                                         phba->cfg_sriov_nr_virtfn);
4639                         phba->cfg_sriov_nr_virtfn = 0;
4640                 }
4641         }
4642
4643         return 0;
4644
4645 out_free_fcp_eq_hdl:
4646         kfree(phba->sli4_hba.fcp_eq_hdl);
4647 out_free_fcf_rr_bmask:
4648         kfree(phba->fcf.fcf_rr_bmask);
4649 out_remove_rpi_hdrs:
4650         lpfc_sli4_remove_rpi_hdrs(phba);
4651 out_free_active_sgl:
4652         lpfc_free_active_sgl(phba);
4653 out_free_sgl_list:
4654         lpfc_free_sgl_list(phba);
4655 out_destroy_cq_event_pool:
4656         lpfc_sli4_cq_event_pool_destroy(phba);
4657 out_free_bsmbx:
4658         lpfc_destroy_bootstrap_mbox(phba);
4659 out_free_mem:
4660         lpfc_mem_free(phba);
4661         return rc;
4662 }
4663
4664 /**
4665  * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4666  * @phba: pointer to lpfc hba data structure.
4667  *
4668  * This routine is invoked to unset the driver internal resources set up
4669  * specific for supporting the SLI-4 HBA device it attached to.
4670  **/
4671 static void
4672 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4673 {
4674         struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4675
4676         /* Free memory allocated for msi-x interrupt vector entries */
4677         kfree(phba->sli4_hba.msix_entries);
4678
4679         /* Free memory allocated for fast-path work queue handles */
4680         kfree(phba->sli4_hba.fcp_eq_hdl);
4681
4682         /* Free the allocated rpi headers. */
4683         lpfc_sli4_remove_rpi_hdrs(phba);
4684         lpfc_sli4_remove_rpis(phba);
4685
4686         /* Free eligible FCF index bmask */
4687         kfree(phba->fcf.fcf_rr_bmask);
4688
4689         /* Free the ELS sgl list */
4690         lpfc_free_active_sgl(phba);
4691         lpfc_free_sgl_list(phba);
4692
4693         /* Free the SCSI sgl management array */
4694         kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4695
4696         /* Free the completion queue EQ event pool */
4697         lpfc_sli4_cq_event_release_all(phba);
4698         lpfc_sli4_cq_event_pool_destroy(phba);
4699
4700         /* Release resource identifiers. */
4701         lpfc_sli4_dealloc_resource_identifiers(phba);
4702
4703         /* Free the bsmbx region. */
4704         lpfc_destroy_bootstrap_mbox(phba);
4705
4706         /* Free the SLI Layer memory with SLI4 HBAs */
4707         lpfc_mem_free_all(phba);
4708
4709         /* Free the current connect table */
4710         list_for_each_entry_safe(conn_entry, next_conn_entry,
4711                 &phba->fcf_conn_rec_list, list) {
4712                 list_del_init(&conn_entry->list);
4713                 kfree(conn_entry);
4714         }
4715
4716         return;
4717 }
4718
4719 /**
4720  * lpfc_init_api_table_setup - Set up init api function jump table
4721  * @phba: The hba struct for which this call is being executed.
4722  * @dev_grp: The HBA PCI-Device group number.
4723  *
4724  * This routine sets up the device INIT interface API function jump table
4725  * in @phba struct.
4726  *
4727  * Returns: 0 - success, -ENODEV - failure.
4728  **/
4729 int
4730 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4731 {
4732         phba->lpfc_hba_init_link = lpfc_hba_init_link;
4733         phba->lpfc_hba_down_link = lpfc_hba_down_link;
4734         phba->lpfc_selective_reset = lpfc_selective_reset;
4735         switch (dev_grp) {
4736         case LPFC_PCI_DEV_LP:
4737                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4738                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4739                 phba->lpfc_stop_port = lpfc_stop_port_s3;
4740                 break;
4741         case LPFC_PCI_DEV_OC:
4742                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4743                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4744                 phba->lpfc_stop_port = lpfc_stop_port_s4;
4745                 break;
4746         default:
4747                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4748                                 "1431 Invalid HBA PCI-device group: 0x%x\n",
4749                                 dev_grp);
4750                 return -ENODEV;
4751                 break;
4752         }
4753         return 0;
4754 }
4755
4756 /**
4757  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4758  * @phba: pointer to lpfc hba data structure.
4759  *
4760  * This routine is invoked to set up the driver internal resources before the
4761  * device specific resource setup to support the HBA device it attached to.
4762  *
4763  * Return codes
4764  *      0 - successful
4765  *      other values - error
4766  **/
4767 static int
4768 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4769 {
4770         /*
4771          * Driver resources common to all SLI revisions
4772          */
4773         atomic_set(&phba->fast_event_count, 0);
4774         spin_lock_init(&phba->hbalock);
4775
4776         /* Initialize ndlp management spinlock */
4777         spin_lock_init(&phba->ndlp_lock);
4778
4779         INIT_LIST_HEAD(&phba->port_list);
4780         INIT_LIST_HEAD(&phba->work_list);
4781         init_waitqueue_head(&phba->wait_4_mlo_m_q);
4782
4783         /* Initialize the wait queue head for the kernel thread */
4784         init_waitqueue_head(&phba->work_waitq);
4785
4786         /* Initialize the scsi buffer list used by driver for scsi IO */
4787         spin_lock_init(&phba->scsi_buf_list_lock);
4788         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4789
4790         /* Initialize the fabric iocb list */
4791         INIT_LIST_HEAD(&phba->fabric_iocb_list);
4792
4793         /* Initialize list to save ELS buffers */
4794         INIT_LIST_HEAD(&phba->elsbuf);
4795
4796         /* Initialize FCF connection rec list */
4797         INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4798
4799         return 0;
4800 }
4801
4802 /**
4803  * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4804  * @phba: pointer to lpfc hba data structure.
4805  *
4806  * This routine is invoked to set up the driver internal resources after the
4807  * device specific resource setup to support the HBA device it attached to.
4808  *
4809  * Return codes
4810  *      0 - successful
4811  *      other values - error
4812  **/
4813 static int
4814 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4815 {
4816         int error;
4817
4818         /* Startup the kernel thread for this host adapter. */
4819         phba->worker_thread = kthread_run(lpfc_do_work, phba,
4820                                           "lpfc_worker_%d", phba->brd_no);
4821         if (IS_ERR(phba->worker_thread)) {
4822                 error = PTR_ERR(phba->worker_thread);
4823                 return error;
4824         }
4825
4826         return 0;
4827 }
4828
4829 /**
4830  * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4831  * @phba: pointer to lpfc hba data structure.
4832  *
4833  * This routine is invoked to unset the driver internal resources set up after
4834  * the device specific resource setup for supporting the HBA device it
4835  * attached to.
4836  **/
4837 static void
4838 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4839 {
4840         /* Stop kernel worker thread */
4841         kthread_stop(phba->worker_thread);
4842 }
4843
4844 /**
4845  * lpfc_free_iocb_list - Free iocb list.
4846  * @phba: pointer to lpfc hba data structure.
4847  *
4848  * This routine is invoked to free the driver's IOCB list and memory.
4849  **/
4850 static void
4851 lpfc_free_iocb_list(struct lpfc_hba *phba)
4852 {
4853         struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4854
4855         spin_lock_irq(&phba->hbalock);
4856         list_for_each_entry_safe(iocbq_entry, iocbq_next,
4857                                  &phba->lpfc_iocb_list, list) {
4858                 list_del(&iocbq_entry->list);
4859                 kfree(iocbq_entry);
4860                 phba->total_iocbq_bufs--;
4861         }
4862         spin_unlock_irq(&phba->hbalock);
4863
4864         return;
4865 }
4866
4867 /**
4868  * lpfc_init_iocb_list - Allocate and initialize iocb list.
4869  * @phba: pointer to lpfc hba data structure.
4870  *
4871  * This routine is invoked to allocate and initizlize the driver's IOCB
4872  * list and set up the IOCB tag array accordingly.
4873  *
4874  * Return codes
4875  *      0 - successful
4876  *      other values - error
4877  **/
4878 static int
4879 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4880 {
4881         struct lpfc_iocbq *iocbq_entry = NULL;
4882         uint16_t iotag;
4883         int i;
4884
4885         /* Initialize and populate the iocb list per host.  */
4886         INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4887         for (i = 0; i < iocb_count; i++) {
4888                 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4889                 if (iocbq_entry == NULL) {
4890                         printk(KERN_ERR "%s: only allocated %d iocbs of "
4891                                 "expected %d count. Unloading driver.\n",
4892                                 __func__, i, LPFC_IOCB_LIST_CNT);
4893                         goto out_free_iocbq;
4894                 }
4895
4896                 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4897                 if (iotag == 0) {
4898                         kfree(iocbq_entry);
4899                         printk(KERN_ERR "%s: failed to allocate IOTAG. "
4900                                 "Unloading driver.\n", __func__);
4901                         goto out_free_iocbq;
4902                 }
4903                 iocbq_entry->sli4_lxritag = NO_XRI;
4904                 iocbq_entry->sli4_xritag = NO_XRI;
4905
4906                 spin_lock_irq(&phba->hbalock);
4907                 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4908                 phba->total_iocbq_bufs++;
4909                 spin_unlock_irq(&phba->hbalock);
4910         }
4911
4912         return 0;
4913
4914 out_free_iocbq:
4915         lpfc_free_iocb_list(phba);
4916
4917         return -ENOMEM;
4918 }
4919
4920 /**
4921  * lpfc_free_sgl_list - Free sgl list.
4922  * @phba: pointer to lpfc hba data structure.
4923  *
4924  * This routine is invoked to free the driver's sgl list and memory.
4925  **/
4926 static void
4927 lpfc_free_sgl_list(struct lpfc_hba *phba)
4928 {
4929         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4930         LIST_HEAD(sglq_list);
4931
4932         spin_lock_irq(&phba->hbalock);
4933         list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4934         spin_unlock_irq(&phba->hbalock);
4935
4936         list_for_each_entry_safe(sglq_entry, sglq_next,
4937                                  &sglq_list, list) {
4938                 list_del(&sglq_entry->list);
4939                 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4940                 kfree(sglq_entry);
4941                 phba->sli4_hba.total_sglq_bufs--;
4942         }
4943         kfree(phba->sli4_hba.lpfc_els_sgl_array);
4944 }
4945
4946 /**
4947  * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4948  * @phba: pointer to lpfc hba data structure.
4949  *
4950  * This routine is invoked to allocate the driver's active sgl memory.
4951  * This array will hold the sglq_entry's for active IOs.
4952  **/
4953 static int
4954 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4955 {
4956         int size;
4957         size = sizeof(struct lpfc_sglq *);
4958         size *= phba->sli4_hba.max_cfg_param.max_xri;
4959
4960         phba->sli4_hba.lpfc_sglq_active_list =
4961                 kzalloc(size, GFP_KERNEL);
4962         if (!phba->sli4_hba.lpfc_sglq_active_list)
4963                 return -ENOMEM;
4964         return 0;
4965 }
4966
4967 /**
4968  * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4969  * @phba: pointer to lpfc hba data structure.
4970  *
4971  * This routine is invoked to walk through the array of active sglq entries
4972  * and free all of the resources.
4973  * This is just a place holder for now.
4974  **/
4975 static void
4976 lpfc_free_active_sgl(struct lpfc_hba *phba)
4977 {
4978         kfree(phba->sli4_hba.lpfc_sglq_active_list);
4979 }
4980
4981 /**
4982  * lpfc_init_sgl_list - Allocate and initialize sgl list.
4983  * @phba: pointer to lpfc hba data structure.
4984  *
4985  * This routine is invoked to allocate and initizlize the driver's sgl
4986  * list and set up the sgl xritag tag array accordingly.
4987  *
4988  * Return codes
4989  *      0 - successful
4990  *      other values - error
4991  **/
4992 static int
4993 lpfc_init_sgl_list(struct lpfc_hba *phba)
4994 {
4995         struct lpfc_sglq *sglq_entry = NULL;
4996         int i;
4997         int els_xri_cnt;
4998
4999         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
5000         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5001                                 "2400 ELS XRI count %d.\n",
5002                                 els_xri_cnt);
5003         /* Initialize and populate the sglq list per host/VF. */
5004         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5005         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5006
5007         /* Sanity check on XRI management */
5008         if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
5009                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5010                                 "2562 No room left for SCSI XRI allocation: "
5011                                 "max_xri=%d, els_xri=%d\n",
5012                                 phba->sli4_hba.max_cfg_param.max_xri,
5013                                 els_xri_cnt);
5014                 return -ENOMEM;
5015         }
5016
5017         /* Allocate memory for the ELS XRI management array */
5018         phba->sli4_hba.lpfc_els_sgl_array =
5019                         kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
5020                         GFP_KERNEL);
5021
5022         if (!phba->sli4_hba.lpfc_els_sgl_array) {
5023                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5024                                 "2401 Failed to allocate memory for ELS "
5025                                 "XRI management array of size %d.\n",
5026                                 els_xri_cnt);
5027                 return -ENOMEM;
5028         }
5029
5030         /* Keep the SCSI XRI into the XRI management array */
5031         phba->sli4_hba.scsi_xri_max =
5032                         phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
5033         phba->sli4_hba.scsi_xri_cnt = 0;
5034         phba->sli4_hba.lpfc_scsi_psb_array =
5035                         kzalloc((sizeof(struct lpfc_scsi_buf *) *
5036                         phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
5037
5038         if (!phba->sli4_hba.lpfc_scsi_psb_array) {
5039                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5040                                 "2563 Failed to allocate memory for SCSI "
5041                                 "XRI management array of size %d.\n",
5042                                 phba->sli4_hba.scsi_xri_max);
5043                 kfree(phba->sli4_hba.lpfc_els_sgl_array);
5044                 return -ENOMEM;
5045         }
5046
5047         for (i = 0; i < els_xri_cnt; i++) {
5048                 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
5049                 if (sglq_entry == NULL) {
5050                         printk(KERN_ERR "%s: only allocated %d sgls of "
5051                                 "expected %d count. Unloading driver.\n",
5052                                 __func__, i, els_xri_cnt);
5053                         goto out_free_mem;
5054                 }
5055
5056                 sglq_entry->buff_type = GEN_BUFF_TYPE;
5057                 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
5058                 if (sglq_entry->virt == NULL) {
5059                         kfree(sglq_entry);
5060                         printk(KERN_ERR "%s: failed to allocate mbuf.\n"
5061                                 "Unloading driver.\n", __func__);
5062                         goto out_free_mem;
5063                 }
5064                 sglq_entry->sgl = sglq_entry->virt;
5065                 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
5066
5067                 /* The list order is used by later block SGL registraton */
5068                 spin_lock_irq(&phba->hbalock);
5069                 sglq_entry->state = SGL_FREED;
5070                 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
5071                 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
5072                 phba->sli4_hba.total_sglq_bufs++;
5073                 spin_unlock_irq(&phba->hbalock);
5074         }
5075         return 0;
5076
5077 out_free_mem:
5078         kfree(phba->sli4_hba.lpfc_scsi_psb_array);
5079         lpfc_free_sgl_list(phba);
5080         return -ENOMEM;
5081 }
5082
5083 /**
5084  * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5085  * @phba: pointer to lpfc hba data structure.
5086  *
5087  * This routine is invoked to post rpi header templates to the
5088  * port for those SLI4 ports that do not support extents.  This routine
5089  * posts a PAGE_SIZE memory region to the port to hold up to
5090  * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
5091  * and should be called only when interrupts are disabled.
5092  *
5093  * Return codes
5094  *      0 - successful
5095  *      -ERROR - otherwise.
5096  **/
5097 int
5098 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5099 {
5100         int rc = 0;
5101         struct lpfc_rpi_hdr *rpi_hdr;
5102
5103         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5104         if (!phba->sli4_hba.rpi_hdrs_in_use)
5105                 return rc;
5106         if (phba->sli4_hba.extents_in_use)
5107                 return -EIO;
5108
5109         rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5110         if (!rpi_hdr) {
5111                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5112                                 "0391 Error during rpi post operation\n");
5113                 lpfc_sli4_remove_rpis(phba);
5114                 rc = -ENODEV;
5115         }
5116
5117         return rc;
5118 }
5119
5120 /**
5121  * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5122  * @phba: pointer to lpfc hba data structure.
5123  *
5124  * This routine is invoked to allocate a single 4KB memory region to
5125  * support rpis and stores them in the phba.  This single region
5126  * provides support for up to 64 rpis.  The region is used globally
5127  * by the device.
5128  *
5129  * Returns:
5130  *   A valid rpi hdr on success.
5131  *   A NULL pointer on any failure.
5132  **/
5133 struct lpfc_rpi_hdr *
5134 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5135 {
5136         uint16_t rpi_limit, curr_rpi_range;
5137         struct lpfc_dmabuf *dmabuf;
5138         struct lpfc_rpi_hdr *rpi_hdr;
5139         uint32_t rpi_count;
5140
5141         /*
5142          * If the SLI4 port supports extents, posting the rpi header isn't
5143          * required.  Set the expected maximum count and let the actual value
5144          * get set when extents are fully allocated.
5145          */
5146         if (!phba->sli4_hba.rpi_hdrs_in_use)
5147                 return NULL;
5148         if (phba->sli4_hba.extents_in_use)
5149                 return NULL;
5150
5151         /* The limit on the logical index is just the max_rpi count. */
5152         rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5153         phba->sli4_hba.max_cfg_param.max_rpi - 1;
5154
5155         spin_lock_irq(&phba->hbalock);
5156         /*
5157          * Establish the starting RPI in this header block.  The starting
5158          * rpi is normalized to a zero base because the physical rpi is
5159          * port based.
5160          */
5161         curr_rpi_range = phba->sli4_hba.next_rpi -
5162                 phba->sli4_hba.max_cfg_param.rpi_base;
5163         spin_unlock_irq(&phba->hbalock);
5164
5165         /*
5166          * The port has a limited number of rpis. The increment here
5167          * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5168          * and to allow the full max_rpi range per port.
5169          */
5170         if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5171                 rpi_count = rpi_limit - curr_rpi_range;
5172         else
5173                 rpi_count = LPFC_RPI_HDR_COUNT;
5174
5175         if (!rpi_count)
5176                 return NULL;
5177         /*
5178          * First allocate the protocol header region for the port.  The
5179          * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5180          */
5181         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5182         if (!dmabuf)
5183                 return NULL;
5184
5185         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5186                                           LPFC_HDR_TEMPLATE_SIZE,
5187                                           &dmabuf->phys,
5188                                           GFP_KERNEL);
5189         if (!dmabuf->virt) {
5190                 rpi_hdr = NULL;
5191                 goto err_free_dmabuf;
5192         }
5193
5194         memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5195         if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5196                 rpi_hdr = NULL;
5197                 goto err_free_coherent;
5198         }
5199
5200         /* Save the rpi header data for cleanup later. */
5201         rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5202         if (!rpi_hdr)
5203                 goto err_free_coherent;
5204
5205         rpi_hdr->dmabuf = dmabuf;
5206         rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5207         rpi_hdr->page_count = 1;
5208         spin_lock_irq(&phba->hbalock);
5209
5210         /* The rpi_hdr stores the logical index only. */
5211         rpi_hdr->start_rpi = curr_rpi_range;
5212         list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5213
5214         /*
5215          * The next_rpi stores the next logical module-64 rpi value used
5216          * to post physical rpis in subsequent rpi postings.
5217          */
5218         phba->sli4_hba.next_rpi += rpi_count;
5219         spin_unlock_irq(&phba->hbalock);
5220         return rpi_hdr;
5221
5222  err_free_coherent:
5223         dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5224                           dmabuf->virt, dmabuf->phys);
5225  err_free_dmabuf:
5226         kfree(dmabuf);
5227         return NULL;
5228 }
5229
5230 /**
5231  * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5232  * @phba: pointer to lpfc hba data structure.
5233  *
5234  * This routine is invoked to remove all memory resources allocated
5235  * to support rpis for SLI4 ports not supporting extents. This routine
5236  * presumes the caller has released all rpis consumed by fabric or port
5237  * logins and is prepared to have the header pages removed.
5238  **/
5239 void
5240 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5241 {
5242         struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5243
5244         if (!phba->sli4_hba.rpi_hdrs_in_use)
5245                 goto exit;
5246
5247         list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5248                                  &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5249                 list_del(&rpi_hdr->list);
5250                 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5251                                   rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5252                 kfree(rpi_hdr->dmabuf);
5253                 kfree(rpi_hdr);
5254         }
5255  exit:
5256         /* There are no rpis available to the port now. */
5257         phba->sli4_hba.next_rpi = 0;
5258 }
5259
5260 /**
5261  * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5262  * @pdev: pointer to pci device data structure.
5263  *
5264  * This routine is invoked to allocate the driver hba data structure for an
5265  * HBA device. If the allocation is successful, the phba reference to the
5266  * PCI device data structure is set.
5267  *
5268  * Return codes
5269  *      pointer to @phba - successful
5270  *      NULL - error
5271  **/
5272 static struct lpfc_hba *
5273 lpfc_hba_alloc(struct pci_dev *pdev)
5274 {
5275         struct lpfc_hba *phba;
5276
5277         /* Allocate memory for HBA structure */
5278         phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5279         if (!phba) {
5280                 dev_err(&pdev->dev, "failed to allocate hba struct\n");
5281                 return NULL;
5282         }
5283
5284         /* Set reference to PCI device in HBA structure */
5285         phba->pcidev = pdev;
5286
5287         /* Assign an unused board number */
5288         phba->brd_no = lpfc_get_instance();
5289         if (phba->brd_no < 0) {
5290                 kfree(phba);
5291                 return NULL;
5292         }
5293
5294         spin_lock_init(&phba->ct_ev_lock);
5295         INIT_LIST_HEAD(&phba->ct_ev_waiters);
5296
5297         return phba;
5298 }
5299
5300 /**
5301  * lpfc_hba_free - Free driver hba data structure with a device.
5302  * @phba: pointer to lpfc hba data structure.
5303  *
5304  * This routine is invoked to free the driver hba data structure with an
5305  * HBA device.
5306  **/
5307 static void
5308 lpfc_hba_free(struct lpfc_hba *phba)
5309 {
5310         /* Release the driver assigned board number */
5311         idr_remove(&lpfc_hba_index, phba->brd_no);
5312
5313         kfree(phba);
5314         return;
5315 }
5316
5317 /**
5318  * lpfc_create_shost - Create hba physical port with associated scsi host.
5319  * @phba: pointer to lpfc hba data structure.
5320  *
5321  * This routine is invoked to create HBA physical port and associate a SCSI
5322  * host with it.
5323  *
5324  * Return codes
5325  *      0 - successful
5326  *      other values - error
5327  **/
5328 static int
5329 lpfc_create_shost(struct lpfc_hba *phba)
5330 {
5331         struct lpfc_vport *vport;
5332         struct Scsi_Host  *shost;
5333
5334         /* Initialize HBA FC structure */
5335         phba->fc_edtov = FF_DEF_EDTOV;
5336         phba->fc_ratov = FF_DEF_RATOV;
5337         phba->fc_altov = FF_DEF_ALTOV;
5338         phba->fc_arbtov = FF_DEF_ARBTOV;
5339
5340         atomic_set(&phba->sdev_cnt, 0);
5341         vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5342         if (!vport)
5343                 return -ENODEV;
5344
5345         shost = lpfc_shost_from_vport(vport);
5346         phba->pport = vport;
5347         lpfc_debugfs_initialize(vport);
5348         /* Put reference to SCSI host to driver's device private data */
5349         pci_set_drvdata(phba->pcidev, shost);
5350
5351         return 0;
5352 }
5353
5354 /**
5355  * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5356  * @phba: pointer to lpfc hba data structure.
5357  *
5358  * This routine is invoked to destroy HBA physical port and the associated
5359  * SCSI host.
5360  **/
5361 static void
5362 lpfc_destroy_shost(struct lpfc_hba *phba)
5363 {
5364         struct lpfc_vport *vport = phba->pport;
5365
5366         /* Destroy physical port that associated with the SCSI host */
5367         destroy_port(vport);
5368
5369         return;
5370 }
5371
5372 /**
5373  * lpfc_setup_bg - Setup Block guard structures and debug areas.
5374  * @phba: pointer to lpfc hba data structure.
5375  * @shost: the shost to be used to detect Block guard settings.
5376  *
5377  * This routine sets up the local Block guard protocol settings for @shost.
5378  * This routine also allocates memory for debugging bg buffers.
5379  **/
5380 static void
5381 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5382 {
5383         int pagecnt = 10;
5384         if (lpfc_prot_mask && lpfc_prot_guard) {
5385                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5386                                 "1478 Registering BlockGuard with the "
5387                                 "SCSI layer\n");
5388                 scsi_host_set_prot(shost, lpfc_prot_mask);
5389                 scsi_host_set_guard(shost, lpfc_prot_guard);
5390         }
5391         if (!_dump_buf_data) {
5392                 while (pagecnt) {
5393                         spin_lock_init(&_dump_buf_lock);
5394                         _dump_buf_data =
5395                                 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5396                         if (_dump_buf_data) {
5397                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5398                                         "9043 BLKGRD: allocated %d pages for "
5399                                        "_dump_buf_data at 0x%p\n",
5400                                        (1 << pagecnt), _dump_buf_data);
5401                                 _dump_buf_data_order = pagecnt;
5402                                 memset(_dump_buf_data, 0,
5403                                        ((1 << PAGE_SHIFT) << pagecnt));
5404                                 break;
5405                         } else
5406                                 --pagecnt;
5407                 }
5408                 if (!_dump_buf_data_order)
5409                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5410                                 "9044 BLKGRD: ERROR unable to allocate "
5411                                "memory for hexdump\n");
5412         } else
5413                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5414                         "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
5415                        "\n", _dump_buf_data);
5416         if (!_dump_buf_dif) {
5417                 while (pagecnt) {
5418                         _dump_buf_dif =
5419                                 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5420                         if (_dump_buf_dif) {
5421                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5422                                         "9046 BLKGRD: allocated %d pages for "
5423                                        "_dump_buf_dif at 0x%p\n",
5424                                        (1 << pagecnt), _dump_buf_dif);
5425                                 _dump_buf_dif_order = pagecnt;
5426                                 memset(_dump_buf_dif, 0,
5427                                        ((1 << PAGE_SHIFT) << pagecnt));
5428                                 break;
5429                         } else
5430                                 --pagecnt;
5431                 }
5432                 if (!_dump_buf_dif_order)
5433                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5434                         "9047 BLKGRD: ERROR unable to allocate "
5435                                "memory for hexdump\n");
5436         } else
5437                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5438                         "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5439                        _dump_buf_dif);
5440 }
5441
5442 /**
5443  * lpfc_post_init_setup - Perform necessary device post initialization setup.
5444  * @phba: pointer to lpfc hba data structure.
5445  *
5446  * This routine is invoked to perform all the necessary post initialization
5447  * setup for the device.
5448  **/
5449 static void
5450 lpfc_post_init_setup(struct lpfc_hba *phba)
5451 {
5452         struct Scsi_Host  *shost;
5453         struct lpfc_adapter_event_header adapter_event;
5454
5455         /* Get the default values for Model Name and Description */
5456         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5457
5458         /*
5459          * hba setup may have changed the hba_queue_depth so we need to
5460          * adjust the value of can_queue.
5461          */
5462         shost = pci_get_drvdata(phba->pcidev);
5463         shost->can_queue = phba->cfg_hba_queue_depth - 10;
5464         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5465                 lpfc_setup_bg(phba, shost);
5466
5467         lpfc_host_attrib_init(shost);
5468
5469         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5470                 spin_lock_irq(shost->host_lock);
5471                 lpfc_poll_start_timer(phba);
5472                 spin_unlock_irq(shost->host_lock);
5473         }
5474
5475         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5476                         "0428 Perform SCSI scan\n");
5477         /* Send board arrival event to upper layer */
5478         adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5479         adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5480         fc_host_post_vendor_event(shost, fc_get_event_number(),
5481                                   sizeof(adapter_event),
5482                                   (char *) &adapter_event,
5483                                   LPFC_NL_VENDOR_ID);
5484         return;
5485 }
5486
5487 /**
5488  * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5489  * @phba: pointer to lpfc hba data structure.
5490  *
5491  * This routine is invoked to set up the PCI device memory space for device
5492  * with SLI-3 interface spec.
5493  *
5494  * Return codes
5495  *      0 - successful
5496  *      other values - error
5497  **/
5498 static int
5499 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5500 {
5501         struct pci_dev *pdev;
5502         unsigned long bar0map_len, bar2map_len;
5503         int i, hbq_count;
5504         void *ptr;
5505         int error = -ENODEV;
5506
5507         /* Obtain PCI device reference */
5508         if (!phba->pcidev)
5509                 return error;
5510         else
5511                 pdev = phba->pcidev;
5512
5513         /* Set the device DMA mask size */
5514         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5515          || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5516                 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5517                  || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5518                         return error;
5519                 }
5520         }
5521
5522         /* Get the bus address of Bar0 and Bar2 and the number of bytes
5523          * required by each mapping.
5524          */
5525         phba->pci_bar0_map = pci_resource_start(pdev, 0);
5526         bar0map_len = pci_resource_len(pdev, 0);
5527
5528         phba->pci_bar2_map = pci_resource_start(pdev, 2);
5529         bar2map_len = pci_resource_len(pdev, 2);
5530
5531         /* Map HBA SLIM to a kernel virtual address. */
5532         phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5533         if (!phba->slim_memmap_p) {
5534                 dev_printk(KERN_ERR, &pdev->dev,
5535                            "ioremap failed for SLIM memory.\n");
5536                 goto out;
5537         }
5538
5539         /* Map HBA Control Registers to a kernel virtual address. */
5540         phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5541         if (!phba->ctrl_regs_memmap_p) {
5542                 dev_printk(KERN_ERR, &pdev->dev,
5543                            "ioremap failed for HBA control registers.\n");
5544                 goto out_iounmap_slim;
5545         }
5546
5547         /* Allocate memory for SLI-2 structures */
5548         phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5549                                                SLI2_SLIM_SIZE,
5550                                                &phba->slim2p.phys,
5551                                                GFP_KERNEL);
5552         if (!phba->slim2p.virt)
5553                 goto out_iounmap;
5554
5555         memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5556         phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5557         phba->mbox_ext = (phba->slim2p.virt +
5558                 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5559         phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5560         phba->IOCBs = (phba->slim2p.virt +
5561                        offsetof(struct lpfc_sli2_slim, IOCBs));
5562
5563         phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5564                                                  lpfc_sli_hbq_size(),
5565                                                  &phba->hbqslimp.phys,
5566                                                  GFP_KERNEL);
5567         if (!phba->hbqslimp.virt)
5568                 goto out_free_slim;
5569
5570         hbq_count = lpfc_sli_hbq_count();
5571         ptr = phba->hbqslimp.virt;
5572         for (i = 0; i < hbq_count; ++i) {
5573                 phba->hbqs[i].hbq_virt = ptr;
5574                 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5575                 ptr += (lpfc_hbq_defs[i]->entry_count *
5576                         sizeof(struct lpfc_hbq_entry));
5577         }
5578         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5579         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5580
5581         memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5582
5583         INIT_LIST_HEAD(&phba->rb_pend_list);
5584
5585         phba->MBslimaddr = phba->slim_memmap_p;
5586         phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5587         phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5588         phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5589         phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5590
5591         return 0;
5592
5593 out_free_slim:
5594         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5595                           phba->slim2p.virt, phba->slim2p.phys);
5596 out_iounmap:
5597         iounmap(phba->ctrl_regs_memmap_p);
5598 out_iounmap_slim:
5599         iounmap(phba->slim_memmap_p);
5600 out:
5601         return error;
5602 }
5603
5604 /**
5605  * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5606  * @phba: pointer to lpfc hba data structure.
5607  *
5608  * This routine is invoked to unset the PCI device memory space for device
5609  * with SLI-3 interface spec.
5610  **/
5611 static void
5612 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5613 {
5614         struct pci_dev *pdev;
5615
5616         /* Obtain PCI device reference */
5617         if (!phba->pcidev)
5618                 return;
5619         else
5620                 pdev = phba->pcidev;
5621
5622         /* Free coherent DMA memory allocated */
5623         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5624                           phba->hbqslimp.virt, phba->hbqslimp.phys);
5625         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5626                           phba->slim2p.virt, phba->slim2p.phys);
5627
5628         /* I/O memory unmap */
5629         iounmap(phba->ctrl_regs_memmap_p);
5630         iounmap(phba->slim_memmap_p);
5631
5632         return;
5633 }
5634
5635 /**
5636  * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5637  * @phba: pointer to lpfc hba data structure.
5638  *
5639  * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5640  * done and check status.
5641  *
5642  * Return 0 if successful, otherwise -ENODEV.
5643  **/
5644 int
5645 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5646 {
5647         struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
5648         struct lpfc_register reg_data;
5649         int i, port_error = 0;
5650         uint32_t if_type;
5651
5652         memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
5653         memset(&reg_data, 0, sizeof(reg_data));
5654         if (!phba->sli4_hba.PSMPHRregaddr)
5655                 return -ENODEV;
5656
5657         /* Wait up to 30 seconds for the SLI Port POST done and ready */
5658         for (i = 0; i < 3000; i++) {
5659                 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
5660                         &portsmphr_reg.word0) ||
5661                         (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
5662                         /* Port has a fatal POST error, break out */
5663                         port_error = -ENODEV;
5664                         break;
5665                 }
5666                 if (LPFC_POST_STAGE_PORT_READY ==
5667                     bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
5668                         break;
5669                 msleep(10);
5670         }
5671
5672         /*
5673          * If there was a port error during POST, then don't proceed with
5674          * other register reads as the data may not be valid.  Just exit.
5675          */
5676         if (port_error) {
5677                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5678                         "1408 Port Failed POST - portsmphr=0x%x, "
5679                         "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
5680                         "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
5681                         portsmphr_reg.word0,
5682                         bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
5683                         bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
5684                         bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
5685                         bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
5686                         bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
5687                         bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
5688                         bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
5689                         bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
5690         } else {
5691                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5692                                 "2534 Device Info: SLIFamily=0x%x, "
5693                                 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
5694                                 "SLIHint_2=0x%x, FT=0x%x\n",
5695                                 bf_get(lpfc_sli_intf_sli_family,
5696                                        &phba->sli4_hba.sli_intf),
5697                                 bf_get(lpfc_sli_intf_slirev,
5698                                        &phba->sli4_hba.sli_intf),
5699                                 bf_get(lpfc_sli_intf_if_type,
5700                                        &phba->sli4_hba.sli_intf),
5701                                 bf_get(lpfc_sli_intf_sli_hint1,
5702                                        &phba->sli4_hba.sli_intf),
5703                                 bf_get(lpfc_sli_intf_sli_hint2,
5704                                        &phba->sli4_hba.sli_intf),
5705                                 bf_get(lpfc_sli_intf_func_type,
5706                                        &phba->sli4_hba.sli_intf));
5707                 /*
5708                  * Check for other Port errors during the initialization
5709                  * process.  Fail the load if the port did not come up
5710                  * correctly.
5711                  */
5712                 if_type = bf_get(lpfc_sli_intf_if_type,
5713                                  &phba->sli4_hba.sli_intf);
5714                 switch (if_type) {
5715                 case LPFC_SLI_INTF_IF_TYPE_0:
5716                         phba->sli4_hba.ue_mask_lo =
5717                               readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
5718                         phba->sli4_hba.ue_mask_hi =
5719                               readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
5720                         uerrlo_reg.word0 =
5721                               readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
5722                         uerrhi_reg.word0 =
5723                                 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
5724                         if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5725                             (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5726                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5727                                                 "1422 Unrecoverable Error "
5728                                                 "Detected during POST "
5729                                                 "uerr_lo_reg=0x%x, "
5730                                                 "uerr_hi_reg=0x%x, "
5731                                                 "ue_mask_lo_reg=0x%x, "
5732                                                 "ue_mask_hi_reg=0x%x\n",
5733                                                 uerrlo_reg.word0,
5734                                                 uerrhi_reg.word0,
5735                                                 phba->sli4_hba.ue_mask_lo,
5736                                                 phba->sli4_hba.ue_mask_hi);
5737                                 port_error = -ENODEV;
5738                         }
5739                         break;
5740                 case LPFC_SLI_INTF_IF_TYPE_2:
5741                         /* Final checks.  The port status should be clean. */
5742                         if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5743                                 &reg_data.word0) ||
5744                                 (bf_get(lpfc_sliport_status_err, &reg_data) &&
5745                                  !bf_get(lpfc_sliport_status_rn, &reg_data))) {
5746                                 phba->work_status[0] =
5747                                         readl(phba->sli4_hba.u.if_type2.
5748                                               ERR1regaddr);
5749                                 phba->work_status[1] =
5750                                         readl(phba->sli4_hba.u.if_type2.
5751                                               ERR2regaddr);
5752                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5753                                         "2888 Port Error Detected "
5754                                         "during POST: "
5755                                         "port status reg 0x%x, "
5756                                         "port_smphr reg 0x%x, "
5757                                         "error 1=0x%x, error 2=0x%x\n",
5758                                         reg_data.word0,
5759                                         portsmphr_reg.word0,
5760                                         phba->work_status[0],
5761                                         phba->work_status[1]);
5762                                 port_error = -ENODEV;
5763                         }
5764                         break;
5765                 case LPFC_SLI_INTF_IF_TYPE_1:
5766                 default:
5767                         break;
5768                 }
5769         }
5770         return port_error;
5771 }
5772
5773 /**
5774  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5775  * @phba: pointer to lpfc hba data structure.
5776  * @if_type:  The SLI4 interface type getting configured.
5777  *
5778  * This routine is invoked to set up SLI4 BAR0 PCI config space register
5779  * memory map.
5780  **/
5781 static void
5782 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5783 {
5784         switch (if_type) {
5785         case LPFC_SLI_INTF_IF_TYPE_0:
5786                 phba->sli4_hba.u.if_type0.UERRLOregaddr =
5787                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
5788                 phba->sli4_hba.u.if_type0.UERRHIregaddr =
5789                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
5790                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
5791                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
5792                 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
5793                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
5794                 phba->sli4_hba.SLIINTFregaddr =
5795                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5796                 break;
5797         case LPFC_SLI_INTF_IF_TYPE_2:
5798                 phba->sli4_hba.u.if_type2.ERR1regaddr =
5799                         phba->sli4_hba.conf_regs_memmap_p +
5800                                                 LPFC_CTL_PORT_ER1_OFFSET;
5801                 phba->sli4_hba.u.if_type2.ERR2regaddr =
5802                         phba->sli4_hba.conf_regs_memmap_p +
5803                                                 LPFC_CTL_PORT_ER2_OFFSET;
5804                 phba->sli4_hba.u.if_type2.CTRLregaddr =
5805                         phba->sli4_hba.conf_regs_memmap_p +
5806                                                 LPFC_CTL_PORT_CTL_OFFSET;
5807                 phba->sli4_hba.u.if_type2.STATUSregaddr =
5808                         phba->sli4_hba.conf_regs_memmap_p +
5809                                                 LPFC_CTL_PORT_STA_OFFSET;
5810                 phba->sli4_hba.SLIINTFregaddr =
5811                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5812                 phba->sli4_hba.PSMPHRregaddr =
5813                         phba->sli4_hba.conf_regs_memmap_p +
5814                                                 LPFC_CTL_PORT_SEM_OFFSET;
5815                 phba->sli4_hba.RQDBregaddr =
5816                         phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5817                 phba->sli4_hba.WQDBregaddr =
5818                         phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
5819                 phba->sli4_hba.EQCQDBregaddr =
5820                         phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
5821                 phba->sli4_hba.MQDBregaddr =
5822                         phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
5823                 phba->sli4_hba.BMBXregaddr =
5824                         phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
5825                 break;
5826         case LPFC_SLI_INTF_IF_TYPE_1:
5827         default:
5828                 dev_printk(KERN_ERR, &phba->pcidev->dev,
5829                            "FATAL - unsupported SLI4 interface type - %d\n",
5830                            if_type);
5831                 break;
5832         }
5833 }
5834
5835 /**
5836  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5837  * @phba: pointer to lpfc hba data structure.
5838  *
5839  * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5840  * memory map.
5841  **/
5842 static void
5843 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5844 {
5845         phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5846                 LPFC_SLIPORT_IF0_SMPHR;
5847         phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5848                 LPFC_HST_ISR0;
5849         phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5850                 LPFC_HST_IMR0;
5851         phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5852                 LPFC_HST_ISCR0;
5853 }
5854
5855 /**
5856  * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5857  * @phba: pointer to lpfc hba data structure.
5858  * @vf: virtual function number
5859  *
5860  * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5861  * based on the given viftual function number, @vf.
5862  *
5863  * Return 0 if successful, otherwise -ENODEV.
5864  **/
5865 static int
5866 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5867 {
5868         if (vf > LPFC_VIR_FUNC_MAX)
5869                 return -ENODEV;
5870
5871         phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5872                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5873         phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5874                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5875         phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5876                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5877         phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5878                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5879         phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5880                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5881         return 0;
5882 }
5883
5884 /**
5885  * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5886  * @phba: pointer to lpfc hba data structure.
5887  *
5888  * This routine is invoked to create the bootstrap mailbox
5889  * region consistent with the SLI-4 interface spec.  This
5890  * routine allocates all memory necessary to communicate
5891  * mailbox commands to the port and sets up all alignment
5892  * needs.  No locks are expected to be held when calling
5893  * this routine.
5894  *
5895  * Return codes
5896  *      0 - successful
5897  *      -ENOMEM - could not allocated memory.
5898  **/
5899 static int
5900 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5901 {
5902         uint32_t bmbx_size;
5903         struct lpfc_dmabuf *dmabuf;
5904         struct dma_address *dma_address;
5905         uint32_t pa_addr;
5906         uint64_t phys_addr;
5907
5908         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5909         if (!dmabuf)
5910                 return -ENOMEM;
5911
5912         /*
5913          * The bootstrap mailbox region is comprised of 2 parts
5914          * plus an alignment restriction of 16 bytes.
5915          */
5916         bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5917         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5918                                           bmbx_size,
5919                                           &dmabuf->phys,
5920                                           GFP_KERNEL);
5921         if (!dmabuf->virt) {
5922                 kfree(dmabuf);
5923                 return -ENOMEM;
5924         }
5925         memset(dmabuf->virt, 0, bmbx_size);
5926
5927         /*
5928          * Initialize the bootstrap mailbox pointers now so that the register
5929          * operations are simple later.  The mailbox dma address is required
5930          * to be 16-byte aligned.  Also align the virtual memory as each
5931          * maibox is copied into the bmbx mailbox region before issuing the
5932          * command to the port.
5933          */
5934         phba->sli4_hba.bmbx.dmabuf = dmabuf;
5935         phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5936
5937         phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5938                                               LPFC_ALIGN_16_BYTE);
5939         phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5940                                               LPFC_ALIGN_16_BYTE);
5941
5942         /*
5943          * Set the high and low physical addresses now.  The SLI4 alignment
5944          * requirement is 16 bytes and the mailbox is posted to the port
5945          * as two 30-bit addresses.  The other data is a bit marking whether
5946          * the 30-bit address is the high or low address.
5947          * Upcast bmbx aphys to 64bits so shift instruction compiles
5948          * clean on 32 bit machines.
5949          */
5950         dma_address = &phba->sli4_hba.bmbx.dma_address;
5951         phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5952         pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5953         dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5954                                            LPFC_BMBX_BIT1_ADDR_HI);
5955
5956         pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5957         dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5958                                            LPFC_BMBX_BIT1_ADDR_LO);
5959         return 0;
5960 }
5961
5962 /**
5963  * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5964  * @phba: pointer to lpfc hba data structure.
5965  *
5966  * This routine is invoked to teardown the bootstrap mailbox
5967  * region and release all host resources. This routine requires
5968  * the caller to ensure all mailbox commands recovered, no
5969  * additional mailbox comands are sent, and interrupts are disabled
5970  * before calling this routine.
5971  *
5972  **/
5973 static void
5974 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5975 {
5976         dma_free_coherent(&phba->pcidev->dev,
5977                           phba->sli4_hba.bmbx.bmbx_size,
5978                           phba->sli4_hba.bmbx.dmabuf->virt,
5979                           phba->sli4_hba.bmbx.dmabuf->phys);
5980
5981         kfree(phba->sli4_hba.bmbx.dmabuf);
5982         memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5983 }
5984
5985 /**
5986  * lpfc_sli4_read_config - Get the config parameters.
5987  * @phba: pointer to lpfc hba data structure.
5988  *
5989  * This routine is invoked to read the configuration parameters from the HBA.
5990  * The configuration parameters are used to set the base and maximum values
5991  * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5992  * allocation for the port.
5993  *
5994  * Return codes
5995  *      0 - successful
5996  *      -ENOMEM - No available memory
5997  *      -EIO - The mailbox failed to complete successfully.
5998  **/
5999 int
6000 lpfc_sli4_read_config(struct lpfc_hba *phba)
6001 {
6002         LPFC_MBOXQ_t *pmb;
6003         struct lpfc_mbx_read_config *rd_config;
6004         union  lpfc_sli4_cfg_shdr *shdr;
6005         uint32_t shdr_status, shdr_add_status;
6006         struct lpfc_mbx_get_func_cfg *get_func_cfg;
6007         struct lpfc_rsrc_desc_fcfcoe *desc;
6008         uint32_t desc_count;
6009         int length, i, rc = 0;
6010
6011         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6012         if (!pmb) {
6013                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6014                                 "2011 Unable to allocate memory for issuing "
6015                                 "SLI_CONFIG_SPECIAL mailbox command\n");
6016                 return -ENOMEM;
6017         }
6018
6019         lpfc_read_config(phba, pmb);
6020
6021         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6022         if (rc != MBX_SUCCESS) {
6023                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6024                         "2012 Mailbox failed , mbxCmd x%x "
6025                         "READ_CONFIG, mbxStatus x%x\n",
6026                         bf_get(lpfc_mqe_command, &pmb->u.mqe),
6027                         bf_get(lpfc_mqe_status, &pmb->u.mqe));
6028                 rc = -EIO;
6029         } else {
6030                 rd_config = &pmb->u.mqe.un.rd_config;
6031                 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
6032                         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6033                         phba->sli4_hba.lnk_info.lnk_tp =
6034                                 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
6035                         phba->sli4_hba.lnk_info.lnk_no =
6036                                 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
6037                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6038                                         "3081 lnk_type:%d, lnk_numb:%d\n",
6039                                         phba->sli4_hba.lnk_info.lnk_tp,
6040                                         phba->sli4_hba.lnk_info.lnk_no);
6041                 } else
6042                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6043                                         "3082 Mailbox (x%x) returned ldv:x0\n",
6044                                         bf_get(lpfc_mqe_command, &pmb->u.mqe));
6045                 phba->sli4_hba.extents_in_use =
6046                         bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
6047                 phba->sli4_hba.max_cfg_param.max_xri =
6048                         bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
6049                 phba->sli4_hba.max_cfg_param.xri_base =
6050                         bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
6051                 phba->sli4_hba.max_cfg_param.max_vpi =
6052                         bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
6053                 phba->sli4_hba.max_cfg_param.vpi_base =
6054                         bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
6055                 phba->sli4_hba.max_cfg_param.max_rpi =
6056                         bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
6057                 phba->sli4_hba.max_cfg_param.rpi_base =
6058                         bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
6059                 phba->sli4_hba.max_cfg_param.max_vfi =
6060                         bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
6061                 phba->sli4_hba.max_cfg_param.vfi_base =
6062                         bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
6063                 phba->sli4_hba.max_cfg_param.max_fcfi =
6064                         bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
6065                 phba->sli4_hba.max_cfg_param.max_eq =
6066                         bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
6067                 phba->sli4_hba.max_cfg_param.max_rq =
6068                         bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
6069                 phba->sli4_hba.max_cfg_param.max_wq =
6070                         bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6071                 phba->sli4_hba.max_cfg_param.max_cq =
6072                         bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6073                 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
6074                 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6075                 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6076                 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
6077                 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
6078                 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6079                                 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
6080                 phba->max_vports = phba->max_vpi;
6081                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6082                                 "2003 cfg params Extents? %d "
6083                                 "XRI(B:%d M:%d), "
6084                                 "VPI(B:%d M:%d) "
6085                                 "VFI(B:%d M:%d) "
6086                                 "RPI(B:%d M:%d) "
6087                                 "FCFI(Count:%d)\n",
6088                                 phba->sli4_hba.extents_in_use,
6089                                 phba->sli4_hba.max_cfg_param.xri_base,
6090                                 phba->sli4_hba.max_cfg_param.max_xri,
6091                                 phba->sli4_hba.max_cfg_param.vpi_base,
6092                                 phba->sli4_hba.max_cfg_param.max_vpi,
6093                                 phba->sli4_hba.max_cfg_param.vfi_base,
6094                                 phba->sli4_hba.max_cfg_param.max_vfi,
6095                                 phba->sli4_hba.max_cfg_param.rpi_base,
6096                                 phba->sli4_hba.max_cfg_param.max_rpi,
6097                                 phba->sli4_hba.max_cfg_param.max_fcfi);
6098         }
6099
6100         if (rc)
6101                 goto read_cfg_out;
6102
6103         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
6104         if (phba->cfg_hba_queue_depth >
6105                 (phba->sli4_hba.max_cfg_param.max_xri -
6106                         lpfc_sli4_get_els_iocb_cnt(phba)))
6107                 phba->cfg_hba_queue_depth =
6108                         phba->sli4_hba.max_cfg_param.max_xri -
6109                                 lpfc_sli4_get_els_iocb_cnt(phba);
6110
6111         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6112             LPFC_SLI_INTF_IF_TYPE_2)
6113                 goto read_cfg_out;
6114
6115         /* get the pf# and vf# for SLI4 if_type 2 port */
6116         length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6117                   sizeof(struct lpfc_sli4_cfg_mhdr));
6118         lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
6119                          LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6120                          length, LPFC_SLI4_MBX_EMBED);
6121
6122         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6123         shdr = (union lpfc_sli4_cfg_shdr *)
6124                                 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6125         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6126         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6127         if (rc || shdr_status || shdr_add_status) {
6128                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6129                                 "3026 Mailbox failed , mbxCmd x%x "
6130                                 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6131                                 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6132                                 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6133                 rc = -EIO;
6134                 goto read_cfg_out;
6135         }
6136
6137         /* search for fc_fcoe resrouce descriptor */
6138         get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6139         desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6140
6141         for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6142                 desc = (struct lpfc_rsrc_desc_fcfcoe *)
6143                         &get_func_cfg->func_cfg.desc[i];
6144                 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6145                     bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
6146                         phba->sli4_hba.iov.pf_number =
6147                                 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6148                         phba->sli4_hba.iov.vf_number =
6149                                 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6150                         break;
6151                 }
6152         }
6153
6154         if (i < LPFC_RSRC_DESC_MAX_NUM)
6155                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6156                                 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6157                                 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6158                                 phba->sli4_hba.iov.vf_number);
6159         else {
6160                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6161                                 "3028 GET_FUNCTION_CONFIG: failed to find "
6162                                 "Resrouce Descriptor:x%x\n",
6163                                 LPFC_RSRC_DESC_TYPE_FCFCOE);
6164                 rc = -EIO;
6165         }
6166
6167 read_cfg_out:
6168         mempool_free(pmb, phba->mbox_mem_pool);
6169         return rc;
6170 }
6171
6172 /**
6173  * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
6174  * @phba: pointer to lpfc hba data structure.
6175  *
6176  * This routine is invoked to setup the port-side endian order when
6177  * the port if_type is 0.  This routine has no function for other
6178  * if_types.
6179  *
6180  * Return codes
6181  *      0 - successful
6182  *      -ENOMEM - No available memory
6183  *      -EIO - The mailbox failed to complete successfully.
6184  **/
6185 static int
6186 lpfc_setup_endian_order(struct lpfc_hba *phba)
6187 {
6188         LPFC_MBOXQ_t *mboxq;
6189         uint32_t if_type, rc = 0;
6190         uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6191                                       HOST_ENDIAN_HIGH_WORD1};
6192
6193         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6194         switch (if_type) {
6195         case LPFC_SLI_INTF_IF_TYPE_0:
6196                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6197                                                        GFP_KERNEL);
6198                 if (!mboxq) {
6199                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6200                                         "0492 Unable to allocate memory for "
6201                                         "issuing SLI_CONFIG_SPECIAL mailbox "
6202                                         "command\n");
6203                         return -ENOMEM;
6204                 }
6205
6206                 /*
6207                  * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6208                  * two words to contain special data values and no other data.
6209                  */
6210                 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6211                 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6212                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6213                 if (rc != MBX_SUCCESS) {
6214                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6215                                         "0493 SLI_CONFIG_SPECIAL mailbox "
6216                                         "failed with status x%x\n",
6217                                         rc);
6218                         rc = -EIO;
6219                 }
6220                 mempool_free(mboxq, phba->mbox_mem_pool);
6221                 break;
6222         case LPFC_SLI_INTF_IF_TYPE_2:
6223         case LPFC_SLI_INTF_IF_TYPE_1:
6224         default:
6225                 break;
6226         }
6227         return rc;
6228 }
6229
6230 /**
6231  * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
6232  * @phba: pointer to lpfc hba data structure.
6233  *
6234  * This routine is invoked to check the user settable queue counts for EQs and
6235  * CQs. after this routine is called the counts will be set to valid values that
6236  * adhere to the constraints of the system's interrupt vectors and the port's
6237  * queue resources.
6238  *
6239  * Return codes
6240  *      0 - successful
6241  *      -ENOMEM - No available memory
6242  **/
6243 static int
6244 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6245 {
6246         int cfg_fcp_wq_count;
6247         int cfg_fcp_eq_count;
6248
6249         /*
6250          * Sanity check for confiugred queue parameters against the run-time
6251          * device parameters
6252          */
6253
6254         /* Sanity check on FCP fast-path WQ parameters */
6255         cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
6256         if (cfg_fcp_wq_count >
6257             (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
6258                 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
6259                                    LPFC_SP_WQN_DEF;
6260                 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
6261                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6262                                         "2581 Not enough WQs (%d) from "
6263                                         "the pci function for supporting "
6264                                         "FCP WQs (%d)\n",
6265                                         phba->sli4_hba.max_cfg_param.max_wq,
6266                                         phba->cfg_fcp_wq_count);
6267                         goto out_error;
6268                 }
6269                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6270                                 "2582 Not enough WQs (%d) from the pci "
6271                                 "function for supporting the requested "
6272                                 "FCP WQs (%d), the actual FCP WQs can "
6273                                 "be supported: %d\n",
6274                                 phba->sli4_hba.max_cfg_param.max_wq,
6275                                 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
6276         }
6277         /* The actual number of FCP work queues adopted */
6278         phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
6279
6280         /* Sanity check on FCP fast-path EQ parameters */
6281         cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
6282         if (cfg_fcp_eq_count >
6283             (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
6284                 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
6285                                    LPFC_SP_EQN_DEF;
6286                 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
6287                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6288                                         "2574 Not enough EQs (%d) from the "
6289                                         "pci function for supporting FCP "
6290                                         "EQs (%d)\n",
6291                                         phba->sli4_hba.max_cfg_param.max_eq,
6292                                         phba->cfg_fcp_eq_count);
6293                         goto out_error;
6294                 }
6295                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6296                                 "2575 Not enough EQs (%d) from the pci "
6297                                 "function for supporting the requested "
6298                                 "FCP EQs (%d), the actual FCP EQs can "
6299                                 "be supported: %d\n",
6300                                 phba->sli4_hba.max_cfg_param.max_eq,
6301                                 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
6302         }
6303         /* It does not make sense to have more EQs than WQs */
6304         if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
6305                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6306                                 "2593 The FCP EQ count(%d) cannot be greater "
6307                                 "than the FCP WQ count(%d), limiting the "
6308                                 "FCP EQ count to %d\n", cfg_fcp_eq_count,
6309                                 phba->cfg_fcp_wq_count,
6310                                 phba->cfg_fcp_wq_count);
6311                 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
6312         }
6313         /* The actual number of FCP event queues adopted */
6314         phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
6315         /* The overall number of event queues used */
6316         phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
6317
6318         /* Get EQ depth from module parameter, fake the default for now */
6319         phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6320         phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6321
6322         /* Get CQ depth from module parameter, fake the default for now */
6323         phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6324         phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6325
6326         return 0;
6327 out_error:
6328         return -ENOMEM;
6329 }
6330
6331 /**
6332  * lpfc_sli4_queue_create - Create all the SLI4 queues
6333  * @phba: pointer to lpfc hba data structure.
6334  *
6335  * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6336  * operation. For each SLI4 queue type, the parameters such as queue entry
6337  * count (queue depth) shall be taken from the module parameter. For now,
6338  * we just use some constant number as place holder.
6339  *
6340  * Return codes
6341  *      0 - sucessful
6342  *      -ENOMEM - No availble memory
6343  *      -EIO - The mailbox failed to complete successfully.
6344  **/
6345 int
6346 lpfc_sli4_queue_create(struct lpfc_hba *phba)
6347 {
6348         struct lpfc_queue *qdesc;
6349         int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6350
6351         /*
6352          * Create Event Queues (EQs)
6353          */
6354
6355         /* Create slow path event queue */
6356         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6357                                       phba->sli4_hba.eq_ecount);
6358         if (!qdesc) {
6359                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6360                                 "0496 Failed allocate slow-path EQ\n");
6361                 goto out_error;
6362         }
6363         phba->sli4_hba.sp_eq = qdesc;
6364
6365         /*
6366          * Create fast-path FCP Event Queue(s).  The cfg_fcp_eq_count can be
6367          * zero whenever there is exactly one interrupt vector.  This is not
6368          * an error.
6369          */
6370         if (phba->cfg_fcp_eq_count) {
6371                 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
6372                                        phba->cfg_fcp_eq_count), GFP_KERNEL);
6373                 if (!phba->sli4_hba.fp_eq) {
6374                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6375                                         "2576 Failed allocate memory for "
6376                                         "fast-path EQ record array\n");
6377                         goto out_free_sp_eq;
6378                 }
6379         }
6380         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6381                 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6382                                               phba->sli4_hba.eq_ecount);
6383                 if (!qdesc) {
6384                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6385                                         "0497 Failed allocate fast-path EQ\n");
6386                         goto out_free_fp_eq;
6387                 }
6388                 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
6389         }
6390
6391         /*
6392          * Create Complete Queues (CQs)
6393          */
6394
6395         /* Create slow-path Mailbox Command Complete Queue */
6396         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6397                                       phba->sli4_hba.cq_ecount);
6398         if (!qdesc) {
6399                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6400                                 "0500 Failed allocate slow-path mailbox CQ\n");
6401                 goto out_free_fp_eq;
6402         }
6403         phba->sli4_hba.mbx_cq = qdesc;
6404
6405         /* Create slow-path ELS Complete Queue */
6406         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6407                                       phba->sli4_hba.cq_ecount);
6408         if (!qdesc) {
6409                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6410                                 "0501 Failed allocate slow-path ELS CQ\n");
6411                 goto out_free_mbx_cq;
6412         }
6413         phba->sli4_hba.els_cq = qdesc;
6414
6415
6416         /*
6417          * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs.
6418          * If there are no FCP EQs then create exactly one FCP CQ.
6419          */
6420         if (phba->cfg_fcp_eq_count)
6421                 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6422                                                  phba->cfg_fcp_eq_count),
6423                                                 GFP_KERNEL);
6424         else
6425                 phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
6426                                                 GFP_KERNEL);
6427         if (!phba->sli4_hba.fcp_cq) {
6428                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6429                                 "2577 Failed allocate memory for fast-path "
6430                                 "CQ record array\n");
6431                 goto out_free_els_cq;
6432         }
6433         fcp_cqidx = 0;
6434         do {
6435                 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6436                                               phba->sli4_hba.cq_ecount);
6437                 if (!qdesc) {
6438                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6439                                         "0499 Failed allocate fast-path FCP "
6440                                         "CQ (%d)\n", fcp_cqidx);
6441                         goto out_free_fcp_cq;
6442                 }
6443                 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6444         } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6445
6446         /* Create Mailbox Command Queue */
6447         phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6448         phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6449
6450         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6451                                       phba->sli4_hba.mq_ecount);
6452         if (!qdesc) {
6453                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6454                                 "0505 Failed allocate slow-path MQ\n");
6455                 goto out_free_fcp_cq;
6456         }
6457         phba->sli4_hba.mbx_wq = qdesc;
6458
6459         /*
6460          * Create all the Work Queues (WQs)
6461          */
6462         phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6463         phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6464
6465         /* Create slow-path ELS Work Queue */
6466         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6467                                       phba->sli4_hba.wq_ecount);
6468         if (!qdesc) {
6469                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6470                                 "0504 Failed allocate slow-path ELS WQ\n");
6471                 goto out_free_mbx_wq;
6472         }
6473         phba->sli4_hba.els_wq = qdesc;
6474
6475         /* Create fast-path FCP Work Queue(s) */
6476         phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6477                                 phba->cfg_fcp_wq_count), GFP_KERNEL);
6478         if (!phba->sli4_hba.fcp_wq) {
6479                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6480                                 "2578 Failed allocate memory for fast-path "
6481                                 "WQ record array\n");
6482                 goto out_free_els_wq;
6483         }
6484         for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6485                 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6486                                               phba->sli4_hba.wq_ecount);
6487                 if (!qdesc) {
6488                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6489                                         "0503 Failed allocate fast-path FCP "
6490                                         "WQ (%d)\n", fcp_wqidx);
6491                         goto out_free_fcp_wq;
6492                 }
6493                 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6494         }
6495
6496         /*
6497          * Create Receive Queue (RQ)
6498          */
6499         phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6500         phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6501
6502         /* Create Receive Queue for header */
6503         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6504                                       phba->sli4_hba.rq_ecount);
6505         if (!qdesc) {
6506                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6507                                 "0506 Failed allocate receive HRQ\n");
6508                 goto out_free_fcp_wq;
6509         }
6510         phba->sli4_hba.hdr_rq = qdesc;
6511
6512         /* Create Receive Queue for data */
6513         qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6514                                       phba->sli4_hba.rq_ecount);
6515         if (!qdesc) {
6516                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6517                                 "0507 Failed allocate receive DRQ\n");
6518                 goto out_free_hdr_rq;
6519         }
6520         phba->sli4_hba.dat_rq = qdesc;
6521
6522         return 0;
6523
6524 out_free_hdr_rq:
6525         lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6526         phba->sli4_hba.hdr_rq = NULL;
6527 out_free_fcp_wq:
6528         for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6529                 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6530                 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6531         }
6532         kfree(phba->sli4_hba.fcp_wq);
6533         phba->sli4_hba.fcp_wq = NULL;
6534 out_free_els_wq:
6535         lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6536         phba->sli4_hba.els_wq = NULL;
6537 out_free_mbx_wq:
6538         lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6539         phba->sli4_hba.mbx_wq = NULL;
6540 out_free_fcp_cq:
6541         for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6542                 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6543                 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6544         }
6545         kfree(phba->sli4_hba.fcp_cq);
6546         phba->sli4_hba.fcp_cq = NULL;
6547 out_free_els_cq:
6548         lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6549         phba->sli4_hba.els_cq = NULL;
6550 out_free_mbx_cq:
6551         lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6552         phba->sli4_hba.mbx_cq = NULL;
6553 out_free_fp_eq:
6554         for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6555                 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6556                 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6557         }
6558         kfree(phba->sli4_hba.fp_eq);
6559         phba->sli4_hba.fp_eq = NULL;
6560 out_free_sp_eq:
6561         lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6562         phba->sli4_hba.sp_eq = NULL;
6563 out_error:
6564         return -ENOMEM;
6565 }
6566
6567 /**
6568  * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
6569  * @phba: pointer to lpfc hba data structure.
6570  *
6571  * This routine is invoked to release all the SLI4 queues with the FCoE HBA
6572  * operation.
6573  *
6574  * Return codes
6575  *      0 - successful
6576  *      -ENOMEM - No available memory
6577  *      -EIO - The mailbox failed to complete successfully.
6578  **/
6579 void
6580 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6581 {
6582         int fcp_qidx;
6583
6584         /* Release mailbox command work queue */
6585         lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6586         phba->sli4_hba.mbx_wq = NULL;
6587
6588         /* Release ELS work queue */
6589         lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6590         phba->sli4_hba.els_wq = NULL;
6591
6592         /* Release FCP work queue */
6593         if (phba->sli4_hba.fcp_wq != NULL)
6594                 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
6595                      fcp_qidx++)
6596                         lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6597         kfree(phba->sli4_hba.fcp_wq);
6598         phba->sli4_hba.fcp_wq = NULL;
6599
6600         /* Release unsolicited receive queue */
6601         lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6602         phba->sli4_hba.hdr_rq = NULL;
6603         lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6604         phba->sli4_hba.dat_rq = NULL;
6605
6606         /* Release ELS complete queue */
6607         lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6608         phba->sli4_hba.els_cq = NULL;
6609
6610         /* Release mailbox command complete queue */
6611         lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6612         phba->sli4_hba.mbx_cq = NULL;
6613
6614         /* Release FCP response complete queue */
6615         fcp_qidx = 0;
6616         if (phba->sli4_hba.fcp_cq != NULL)
6617                 do
6618                         lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6619                 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6620         kfree(phba->sli4_hba.fcp_cq);
6621         phba->sli4_hba.fcp_cq = NULL;
6622
6623         /* Release fast-path event queue */
6624         if (phba->sli4_hba.fp_eq != NULL)
6625                 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6626                      fcp_qidx++)
6627                         lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6628         kfree(phba->sli4_hba.fp_eq);
6629         phba->sli4_hba.fp_eq = NULL;
6630
6631         /* Release slow-path event queue */
6632         lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6633         phba->sli4_hba.sp_eq = NULL;
6634
6635         return;
6636 }
6637
6638 /**
6639  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
6640  * @phba: pointer to lpfc hba data structure.
6641  *
6642  * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
6643  * operation.
6644  *
6645  * Return codes
6646  *      0 - successful
6647  *      -ENOMEM - No available memory
6648  *      -EIO - The mailbox failed to complete successfully.
6649  **/
6650 int
6651 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6652 {
6653         int rc = -ENOMEM;
6654         int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6655         int fcp_cq_index = 0;
6656
6657         /*
6658          * Set up Event Queues (EQs)
6659          */
6660
6661         /* Set up slow-path event queue */
6662         if (!phba->sli4_hba.sp_eq) {
6663                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6664                                 "0520 Slow-path EQ not allocated\n");
6665                 goto out_error;
6666         }
6667         rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6668                             LPFC_SP_DEF_IMAX);
6669         if (rc) {
6670                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6671                                 "0521 Failed setup of slow-path EQ: "
6672                                 "rc = 0x%x\n", rc);
6673                 goto out_error;
6674         }
6675         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6676                         "2583 Slow-path EQ setup: queue-id=%d\n",
6677                         phba->sli4_hba.sp_eq->queue_id);
6678
6679         /* Set up fast-path event queue */
6680         if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
6681                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6682                                 "3147 Fast-path EQs not allocated\n");
6683                 rc = -ENOMEM;
6684                 goto out_destroy_sp_eq;
6685         }
6686         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6687                 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6688                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6689                                         "0522 Fast-path EQ (%d) not "
6690                                         "allocated\n", fcp_eqidx);
6691                         rc = -ENOMEM;
6692                         goto out_destroy_fp_eq;
6693                 }
6694                 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6695                                     phba->cfg_fcp_imax);
6696                 if (rc) {
6697                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6698                                         "0523 Failed setup of fast-path EQ "
6699                                         "(%d), rc = 0x%x\n", fcp_eqidx, rc);
6700                         goto out_destroy_fp_eq;
6701                 }
6702                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6703                                 "2584 Fast-path EQ setup: "
6704                                 "queue[%d]-id=%d\n", fcp_eqidx,
6705                                 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6706         }
6707
6708         /*
6709          * Set up Complete Queues (CQs)
6710          */
6711
6712         /* Set up slow-path MBOX Complete Queue as the first CQ */
6713         if (!phba->sli4_hba.mbx_cq) {
6714                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6715                                 "0528 Mailbox CQ not allocated\n");
6716                 rc = -ENOMEM;
6717                 goto out_destroy_fp_eq;
6718         }
6719         rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6720                             LPFC_MCQ, LPFC_MBOX);
6721         if (rc) {
6722                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6723                                 "0529 Failed setup of slow-path mailbox CQ: "
6724                                 "rc = 0x%x\n", rc);
6725                 goto out_destroy_fp_eq;
6726         }
6727         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6728                         "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6729                         phba->sli4_hba.mbx_cq->queue_id,
6730                         phba->sli4_hba.sp_eq->queue_id);
6731
6732         /* Set up slow-path ELS Complete Queue */
6733         if (!phba->sli4_hba.els_cq) {
6734                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6735                                 "0530 ELS CQ not allocated\n");
6736                 rc = -ENOMEM;
6737                 goto out_destroy_mbx_cq;
6738         }
6739         rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6740                             LPFC_WCQ, LPFC_ELS);
6741         if (rc) {
6742                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6743                                 "0531 Failed setup of slow-path ELS CQ: "
6744                                 "rc = 0x%x\n", rc);
6745                 goto out_destroy_mbx_cq;
6746         }
6747         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6748                         "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6749                         phba->sli4_hba.els_cq->queue_id,
6750                         phba->sli4_hba.sp_eq->queue_id);
6751
6752         /* Set up fast-path FCP Response Complete Queue */
6753         if (!phba->sli4_hba.fcp_cq) {
6754                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6755                                 "3148 Fast-path FCP CQ array not "
6756                                 "allocated\n");
6757                 rc = -ENOMEM;
6758                 goto out_destroy_els_cq;
6759         }
6760         fcp_cqidx = 0;
6761         do {
6762                 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6763                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6764                                         "0526 Fast-path FCP CQ (%d) not "
6765                                         "allocated\n", fcp_cqidx);
6766                         rc = -ENOMEM;
6767                         goto out_destroy_fcp_cq;
6768                 }
6769                 if (phba->cfg_fcp_eq_count)
6770                         rc = lpfc_cq_create(phba,
6771                                             phba->sli4_hba.fcp_cq[fcp_cqidx],
6772                                             phba->sli4_hba.fp_eq[fcp_cqidx],
6773                                             LPFC_WCQ, LPFC_FCP);
6774                 else
6775                         rc = lpfc_cq_create(phba,
6776                                             phba->sli4_hba.fcp_cq[fcp_cqidx],
6777                                             phba->sli4_hba.sp_eq,
6778                                             LPFC_WCQ, LPFC_FCP);
6779                 if (rc) {
6780                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6781                                         "0527 Failed setup of fast-path FCP "
6782                                         "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6783                         goto out_destroy_fcp_cq;
6784                 }
6785                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6786                                 "2588 FCP CQ setup: cq[%d]-id=%d, "
6787                                 "parent %seq[%d]-id=%d\n",
6788                                 fcp_cqidx,
6789                                 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6790                                 (phba->cfg_fcp_eq_count) ? "" : "sp_",
6791                                 fcp_cqidx,
6792                                 (phba->cfg_fcp_eq_count) ?
6793                                    phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
6794                                    phba->sli4_hba.sp_eq->queue_id);
6795         } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6796
6797         /*
6798          * Set up all the Work Queues (WQs)
6799          */
6800
6801         /* Set up Mailbox Command Queue */
6802         if (!phba->sli4_hba.mbx_wq) {
6803                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6804                                 "0538 Slow-path MQ not allocated\n");
6805                 rc = -ENOMEM;
6806                 goto out_destroy_fcp_cq;
6807         }
6808         rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6809                             phba->sli4_hba.mbx_cq, LPFC_MBOX);
6810         if (rc) {
6811                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6812                                 "0539 Failed setup of slow-path MQ: "
6813                                 "rc = 0x%x\n", rc);
6814                 goto out_destroy_fcp_cq;
6815         }
6816         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6817                         "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6818                         phba->sli4_hba.mbx_wq->queue_id,
6819                         phba->sli4_hba.mbx_cq->queue_id);
6820
6821         /* Set up slow-path ELS Work Queue */
6822         if (!phba->sli4_hba.els_wq) {
6823                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6824                                 "0536 Slow-path ELS WQ not allocated\n");
6825                 rc = -ENOMEM;
6826                 goto out_destroy_mbx_wq;
6827         }
6828         rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6829                             phba->sli4_hba.els_cq, LPFC_ELS);
6830         if (rc) {
6831                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6832                                 "0537 Failed setup of slow-path ELS WQ: "
6833                                 "rc = 0x%x\n", rc);
6834                 goto out_destroy_mbx_wq;
6835         }
6836         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6837                         "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6838                         phba->sli4_hba.els_wq->queue_id,
6839                         phba->sli4_hba.els_cq->queue_id);
6840
6841         /* Set up fast-path FCP Work Queue */
6842         if (!phba->sli4_hba.fcp_wq) {
6843                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6844                                 "3149 Fast-path FCP WQ array not "
6845                                 "allocated\n");
6846                 rc = -ENOMEM;
6847                 goto out_destroy_els_wq;
6848         }
6849         for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6850                 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6851                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6852                                         "0534 Fast-path FCP WQ (%d) not "
6853                                         "allocated\n", fcp_wqidx);
6854                         rc = -ENOMEM;
6855                         goto out_destroy_fcp_wq;
6856                 }
6857                 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6858                                     phba->sli4_hba.fcp_cq[fcp_cq_index],
6859                                     LPFC_FCP);
6860                 if (rc) {
6861                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6862                                         "0535 Failed setup of fast-path FCP "
6863                                         "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6864                         goto out_destroy_fcp_wq;
6865                 }
6866                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6867                                 "2591 FCP WQ setup: wq[%d]-id=%d, "
6868                                 "parent cq[%d]-id=%d\n",
6869                                 fcp_wqidx,
6870                                 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6871                                 fcp_cq_index,
6872                                 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6873                 /* Round robin FCP Work Queue's Completion Queue assignment */
6874                 if (phba->cfg_fcp_eq_count)
6875                         fcp_cq_index = ((fcp_cq_index + 1) %
6876                                         phba->cfg_fcp_eq_count);
6877         }
6878
6879         /*
6880          * Create Receive Queue (RQ)
6881          */
6882         if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6883                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6884                                 "0540 Receive Queue not allocated\n");
6885                 rc = -ENOMEM;
6886                 goto out_destroy_fcp_wq;
6887         }
6888
6889         lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
6890         lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
6891
6892         rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6893                             phba->sli4_hba.els_cq, LPFC_USOL);
6894         if (rc) {
6895                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6896                                 "0541 Failed setup of Receive Queue: "
6897                                 "rc = 0x%x\n", rc);
6898                 goto out_destroy_fcp_wq;
6899         }
6900
6901         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6902                         "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6903                         "parent cq-id=%d\n",
6904                         phba->sli4_hba.hdr_rq->queue_id,
6905                         phba->sli4_hba.dat_rq->queue_id,
6906                         phba->sli4_hba.els_cq->queue_id);
6907         return 0;
6908
6909 out_destroy_fcp_wq:
6910         for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6911                 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6912 out_destroy_els_wq:
6913         lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6914 out_destroy_mbx_wq:
6915         lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6916 out_destroy_fcp_cq:
6917         for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6918                 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6919 out_destroy_els_cq:
6920         lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6921 out_destroy_mbx_cq:
6922         lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6923 out_destroy_fp_eq:
6924         for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6925                 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6926 out_destroy_sp_eq:
6927         lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6928 out_error:
6929         return rc;
6930 }
6931
6932 /**
6933  * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6934  * @phba: pointer to lpfc hba data structure.
6935  *
6936  * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6937  * operation.
6938  *
6939  * Return codes
6940  *      0 - successful
6941  *      -ENOMEM - No available memory
6942  *      -EIO - The mailbox failed to complete successfully.
6943  **/
6944 void
6945 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6946 {
6947         int fcp_qidx;
6948
6949         /* Unset mailbox command work queue */
6950         lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6951         /* Unset ELS work queue */
6952         lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6953         /* Unset unsolicited receive queue */
6954         lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6955         /* Unset FCP work queue */
6956         for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6957                 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6958         /* Unset mailbox command complete queue */
6959         lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6960         /* Unset ELS complete queue */
6961         lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6962         /* Unset FCP response complete queue */
6963         if (phba->sli4_hba.fcp_cq) {
6964                 fcp_qidx = 0;
6965                 do {
6966                         lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6967                 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
6968         }
6969         /* Unset fast-path event queue */
6970         if (phba->sli4_hba.fp_eq) {
6971                 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6972                      fcp_qidx++)
6973                         lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6974         }
6975         /* Unset slow-path event queue */
6976         lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6977 }
6978
6979 /**
6980  * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6981  * @phba: pointer to lpfc hba data structure.
6982  *
6983  * This routine is invoked to allocate and set up a pool of completion queue
6984  * events. The body of the completion queue event is a completion queue entry
6985  * CQE. For now, this pool is used for the interrupt service routine to queue
6986  * the following HBA completion queue events for the worker thread to process:
6987  *   - Mailbox asynchronous events
6988  *   - Receive queue completion unsolicited events
6989  * Later, this can be used for all the slow-path events.
6990  *
6991  * Return codes
6992  *      0 - successful
6993  *      -ENOMEM - No available memory
6994  **/
6995 static int
6996 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6997 {
6998         struct lpfc_cq_event *cq_event;
6999         int i;
7000
7001         for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
7002                 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
7003                 if (!cq_event)
7004                         goto out_pool_create_fail;
7005                 list_add_tail(&cq_event->list,
7006                               &phba->sli4_hba.sp_cqe_event_pool);
7007         }
7008         return 0;
7009
7010 out_pool_create_fail:
7011         lpfc_sli4_cq_event_pool_destroy(phba);
7012         return -ENOMEM;
7013 }
7014
7015 /**
7016  * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
7017  * @phba: pointer to lpfc hba data structure.
7018  *
7019  * This routine is invoked to free the pool of completion queue events at
7020  * driver unload time. Note that, it is the responsibility of the driver
7021  * cleanup routine to free all the outstanding completion-queue events
7022  * allocated from this pool back into the pool before invoking this routine
7023  * to destroy the pool.
7024  **/
7025 static void
7026 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
7027 {
7028         struct lpfc_cq_event *cq_event, *next_cq_event;
7029
7030         list_for_each_entry_safe(cq_event, next_cq_event,
7031                                  &phba->sli4_hba.sp_cqe_event_pool, list) {
7032                 list_del(&cq_event->list);
7033                 kfree(cq_event);
7034         }
7035 }
7036
7037 /**
7038  * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7039  * @phba: pointer to lpfc hba data structure.
7040  *
7041  * This routine is the lock free version of the API invoked to allocate a
7042  * completion-queue event from the free pool.
7043  *
7044  * Return: Pointer to the newly allocated completion-queue event if successful
7045  *         NULL otherwise.
7046  **/
7047 struct lpfc_cq_event *
7048 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7049 {
7050         struct lpfc_cq_event *cq_event = NULL;
7051
7052         list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
7053                          struct lpfc_cq_event, list);
7054         return cq_event;
7055 }
7056
7057 /**
7058  * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7059  * @phba: pointer to lpfc hba data structure.
7060  *
7061  * This routine is the lock version of the API invoked to allocate a
7062  * completion-queue event from the free pool.
7063  *
7064  * Return: Pointer to the newly allocated completion-queue event if successful
7065  *         NULL otherwise.
7066  **/
7067 struct lpfc_cq_event *
7068 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7069 {
7070         struct lpfc_cq_event *cq_event;
7071         unsigned long iflags;
7072
7073         spin_lock_irqsave(&phba->hbalock, iflags);
7074         cq_event = __lpfc_sli4_cq_event_alloc(phba);
7075         spin_unlock_irqrestore(&phba->hbalock, iflags);
7076         return cq_event;
7077 }
7078
7079 /**
7080  * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7081  * @phba: pointer to lpfc hba data structure.
7082  * @cq_event: pointer to the completion queue event to be freed.
7083  *
7084  * This routine is the lock free version of the API invoked to release a
7085  * completion-queue event back into the free pool.
7086  **/
7087 void
7088 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7089                              struct lpfc_cq_event *cq_event)
7090 {
7091         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
7092 }
7093
7094 /**
7095  * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7096  * @phba: pointer to lpfc hba data structure.
7097  * @cq_event: pointer to the completion queue event to be freed.
7098  *
7099  * This routine is the lock version of the API invoked to release a
7100  * completion-queue event back into the free pool.
7101  **/
7102 void
7103 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7104                            struct lpfc_cq_event *cq_event)
7105 {
7106         unsigned long iflags;
7107         spin_lock_irqsave(&phba->hbalock, iflags);
7108         __lpfc_sli4_cq_event_release(phba, cq_event);
7109         spin_unlock_irqrestore(&phba->hbalock, iflags);
7110 }
7111
7112 /**
7113  * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
7114  * @phba: pointer to lpfc hba data structure.
7115  *
7116  * This routine is to free all the pending completion-queue events to the
7117  * back into the free pool for device reset.
7118  **/
7119 static void
7120 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
7121 {
7122         LIST_HEAD(cqelist);
7123         struct lpfc_cq_event *cqe;
7124         unsigned long iflags;
7125
7126         /* Retrieve all the pending WCQEs from pending WCQE lists */
7127         spin_lock_irqsave(&phba->hbalock, iflags);
7128         /* Pending FCP XRI abort events */
7129         list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
7130                          &cqelist);
7131         /* Pending ELS XRI abort events */
7132         list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
7133                          &cqelist);
7134         /* Pending asynnc events */
7135         list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
7136                          &cqelist);
7137         spin_unlock_irqrestore(&phba->hbalock, iflags);
7138
7139         while (!list_empty(&cqelist)) {
7140                 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
7141                 lpfc_sli4_cq_event_release(phba, cqe);
7142         }
7143 }
7144
7145 /**
7146  * lpfc_pci_function_reset - Reset pci function.
7147  * @phba: pointer to lpfc hba data structure.
7148  *
7149  * This routine is invoked to request a PCI function reset. It will destroys
7150  * all resources assigned to the PCI function which originates this request.
7151  *
7152  * Return codes
7153  *      0 - successful
7154  *      -ENOMEM - No available memory
7155  *      -EIO - The mailbox failed to complete successfully.
7156  **/
7157 int
7158 lpfc_pci_function_reset(struct lpfc_hba *phba)
7159 {
7160         LPFC_MBOXQ_t *mboxq;
7161         uint32_t rc = 0, if_type;
7162         uint32_t shdr_status, shdr_add_status;
7163         uint32_t rdy_chk, num_resets = 0, reset_again = 0;
7164         union lpfc_sli4_cfg_shdr *shdr;
7165         struct lpfc_register reg_data;
7166
7167         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7168         switch (if_type) {
7169         case LPFC_SLI_INTF_IF_TYPE_0:
7170                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7171                                                        GFP_KERNEL);
7172                 if (!mboxq) {
7173                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7174                                         "0494 Unable to allocate memory for "
7175                                         "issuing SLI_FUNCTION_RESET mailbox "
7176                                         "command\n");
7177                         return -ENOMEM;
7178                 }
7179
7180                 /* Setup PCI function reset mailbox-ioctl command */
7181                 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7182                                  LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
7183                                  LPFC_SLI4_MBX_EMBED);
7184                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7185                 shdr = (union lpfc_sli4_cfg_shdr *)
7186                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7187                 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7188                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7189                                          &shdr->response);
7190                 if (rc != MBX_TIMEOUT)
7191                         mempool_free(mboxq, phba->mbox_mem_pool);
7192                 if (shdr_status || shdr_add_status || rc) {
7193                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7194                                         "0495 SLI_FUNCTION_RESET mailbox "
7195                                         "failed with status x%x add_status x%x,"
7196                                         " mbx status x%x\n",
7197                                         shdr_status, shdr_add_status, rc);
7198                         rc = -ENXIO;
7199                 }
7200                 break;
7201         case LPFC_SLI_INTF_IF_TYPE_2:
7202                 for (num_resets = 0;
7203                      num_resets < MAX_IF_TYPE_2_RESETS;
7204                      num_resets++) {
7205                         reg_data.word0 = 0;
7206                         bf_set(lpfc_sliport_ctrl_end, &reg_data,
7207                                LPFC_SLIPORT_LITTLE_ENDIAN);
7208                         bf_set(lpfc_sliport_ctrl_ip, &reg_data,
7209                                LPFC_SLIPORT_INIT_PORT);
7210                         writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7211                                CTRLregaddr);
7212
7213                         /*
7214                          * Poll the Port Status Register and wait for RDY for
7215                          * up to 10 seconds.  If the port doesn't respond, treat
7216                          * it as an error.  If the port responds with RN, start
7217                          * the loop again.
7218                          */
7219                         for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
7220                                 msleep(10);
7221                                 if (lpfc_readl(phba->sli4_hba.u.if_type2.
7222                                               STATUSregaddr, &reg_data.word0)) {
7223                                         rc = -ENODEV;
7224                                         goto out;
7225                                 }
7226                                 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
7227                                         break;
7228                                 if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
7229                                         reset_again++;
7230                                         break;
7231                                 }
7232                         }
7233
7234                         /*
7235                          * If the port responds to the init request with
7236                          * reset needed, delay for a bit and restart the loop.
7237                          */
7238                         if (reset_again) {
7239                                 msleep(10);
7240                                 reset_again = 0;
7241                                 continue;
7242                         }
7243
7244                         /* Detect any port errors. */
7245                         if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
7246                             (rdy_chk >= 1000)) {
7247                                 phba->work_status[0] = readl(
7248                                         phba->sli4_hba.u.if_type2.ERR1regaddr);
7249                                 phba->work_status[1] = readl(
7250                                         phba->sli4_hba.u.if_type2.ERR2regaddr);
7251                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7252                                         "2890 Port Error Detected "
7253                                         "during Port Reset: "
7254                                         "port status reg 0x%x, "
7255                                         "error 1=0x%x, error 2=0x%x\n",
7256                                         reg_data.word0,
7257                                         phba->work_status[0],
7258                                         phba->work_status[1]);
7259                                 rc = -ENODEV;
7260                         }
7261
7262                         /*
7263                          * Terminate the outer loop provided the Port indicated
7264                          * ready within 10 seconds.
7265                          */
7266                         if (rdy_chk < 1000)
7267                                 break;
7268                 }
7269                 /* delay driver action following IF_TYPE_2 function reset */
7270                 msleep(100);
7271                 break;
7272         case LPFC_SLI_INTF_IF_TYPE_1:
7273         default:
7274                 break;
7275         }
7276
7277 out:
7278         /* Catch the not-ready port failure after a port reset. */
7279         if (num_resets >= MAX_IF_TYPE_2_RESETS)
7280                 rc = -ENODEV;
7281
7282         return rc;
7283 }
7284
7285 /**
7286  * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
7287  * @phba: pointer to lpfc hba data structure.
7288  * @cnt: number of nop mailbox commands to send.
7289  *
7290  * This routine is invoked to send a number @cnt of NOP mailbox command and
7291  * wait for each command to complete.
7292  *
7293  * Return: the number of NOP mailbox command completed.
7294  **/
7295 static int
7296 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7297 {
7298         LPFC_MBOXQ_t *mboxq;
7299         int length, cmdsent;
7300         uint32_t mbox_tmo;
7301         uint32_t rc = 0;
7302         uint32_t shdr_status, shdr_add_status;
7303         union lpfc_sli4_cfg_shdr *shdr;
7304
7305         if (cnt == 0) {
7306                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7307                                 "2518 Requested to send 0 NOP mailbox cmd\n");
7308                 return cnt;
7309         }
7310
7311         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7312         if (!mboxq) {
7313                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7314                                 "2519 Unable to allocate memory for issuing "
7315                                 "NOP mailbox command\n");
7316                 return 0;
7317         }
7318
7319         /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7320         length = (sizeof(struct lpfc_mbx_nop) -
7321                   sizeof(struct lpfc_sli4_cfg_mhdr));
7322         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7323                          LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
7324
7325         for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7326                 if (!phba->sli4_hba.intr_enable)
7327                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7328                 else {
7329                         mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
7330                         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7331                 }
7332                 if (rc == MBX_TIMEOUT)
7333                         break;
7334                 /* Check return status */
7335                 shdr = (union lpfc_sli4_cfg_shdr *)
7336                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7337                 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7338                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7339                                          &shdr->response);
7340                 if (shdr_status || shdr_add_status || rc) {
7341                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7342                                         "2520 NOP mailbox command failed "
7343                                         "status x%x add_status x%x mbx "
7344                                         "status x%x\n", shdr_status,
7345                                         shdr_add_status, rc);
7346                         break;
7347                 }
7348         }
7349
7350         if (rc != MBX_TIMEOUT)
7351                 mempool_free(mboxq, phba->mbox_mem_pool);
7352
7353         return cmdsent;
7354 }
7355
7356 /**
7357  * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7358  * @phba: pointer to lpfc hba data structure.
7359  *
7360  * This routine is invoked to set up the PCI device memory space for device
7361  * with SLI-4 interface spec.
7362  *
7363  * Return codes
7364  *      0 - successful
7365  *      other values - error
7366  **/
7367 static int
7368 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7369 {
7370         struct pci_dev *pdev;
7371         unsigned long bar0map_len, bar1map_len, bar2map_len;
7372         int error = -ENODEV;
7373         uint32_t if_type;
7374
7375         /* Obtain PCI device reference */
7376         if (!phba->pcidev)
7377                 return error;
7378         else
7379                 pdev = phba->pcidev;
7380
7381         /* Set the device DMA mask size */
7382         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7383          || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7384                 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7385                  || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7386                         return error;
7387                 }
7388         }
7389
7390         /*
7391          * The BARs and register set definitions and offset locations are
7392          * dependent on the if_type.
7393          */
7394         if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
7395                                   &phba->sli4_hba.sli_intf.word0)) {
7396                 return error;
7397         }
7398
7399         /* There is no SLI3 failback for SLI4 devices. */
7400         if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
7401             LPFC_SLI_INTF_VALID) {
7402                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7403                                 "2894 SLI_INTF reg contents invalid "
7404                                 "sli_intf reg 0x%x\n",
7405                                 phba->sli4_hba.sli_intf.word0);
7406                 return error;
7407         }
7408
7409         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7410         /*
7411          * Get the bus address of SLI4 device Bar regions and the
7412          * number of bytes required by each mapping. The mapping of the
7413          * particular PCI BARs regions is dependent on the type of
7414          * SLI4 device.
7415          */
7416         if (pci_resource_start(pdev, 0)) {
7417                 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7418                 bar0map_len = pci_resource_len(pdev, 0);
7419
7420                 /*
7421                  * Map SLI4 PCI Config Space Register base to a kernel virtual
7422                  * addr
7423                  */
7424                 phba->sli4_hba.conf_regs_memmap_p =
7425                         ioremap(phba->pci_bar0_map, bar0map_len);
7426                 if (!phba->sli4_hba.conf_regs_memmap_p) {
7427                         dev_printk(KERN_ERR, &pdev->dev,
7428                                    "ioremap failed for SLI4 PCI config "
7429                                    "registers.\n");
7430                         goto out;
7431                 }
7432                 /* Set up BAR0 PCI config space register memory map */
7433                 lpfc_sli4_bar0_register_memmap(phba, if_type);
7434         } else {
7435                 phba->pci_bar0_map = pci_resource_start(pdev, 1);
7436                 bar0map_len = pci_resource_len(pdev, 1);
7437                 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7438                         dev_printk(KERN_ERR, &pdev->dev,
7439                            "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
7440                         goto out;
7441                 }
7442                 phba->sli4_hba.conf_regs_memmap_p =
7443                                 ioremap(phba->pci_bar0_map, bar0map_len);
7444                 if (!phba->sli4_hba.conf_regs_memmap_p) {
7445                         dev_printk(KERN_ERR, &pdev->dev,
7446                                 "ioremap failed for SLI4 PCI config "
7447                                 "registers.\n");
7448                                 goto out;
7449                 }
7450                 lpfc_sli4_bar0_register_memmap(phba, if_type);
7451         }
7452
7453         if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7454             (pci_resource_start(pdev, 2))) {
7455                 /*
7456                  * Map SLI4 if type 0 HBA Control Register base to a kernel
7457                  * virtual address and setup the registers.
7458                  */
7459                 phba->pci_bar1_map = pci_resource_start(pdev, 2);
7460                 bar1map_len = pci_resource_len(pdev, 2);
7461                 phba->sli4_hba.ctrl_regs_memmap_p =
7462                                 ioremap(phba->pci_bar1_map, bar1map_len);
7463                 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
7464                         dev_printk(KERN_ERR, &pdev->dev,
7465                            "ioremap failed for SLI4 HBA control registers.\n");
7466                         goto out_iounmap_conf;
7467                 }
7468                 lpfc_sli4_bar1_register_memmap(phba);
7469         }
7470
7471         if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7472             (pci_resource_start(pdev, 4))) {
7473                 /*
7474                  * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7475                  * virtual address and setup the registers.
7476                  */
7477                 phba->pci_bar2_map = pci_resource_start(pdev, 4);
7478                 bar2map_len = pci_resource_len(pdev, 4);
7479                 phba->sli4_hba.drbl_regs_memmap_p =
7480                                 ioremap(phba->pci_bar2_map, bar2map_len);
7481                 if (!phba->sli4_hba.drbl_regs_memmap_p) {
7482                         dev_printk(KERN_ERR, &pdev->dev,
7483                            "ioremap failed for SLI4 HBA doorbell registers.\n");
7484                         goto out_iounmap_ctrl;
7485                 }
7486                 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
7487                 if (error)
7488                         goto out_iounmap_all;
7489         }
7490
7491         return 0;
7492
7493 out_iounmap_all:
7494         iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7495 out_iounmap_ctrl:
7496         iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7497 out_iounmap_conf:
7498         iounmap(phba->sli4_hba.conf_regs_memmap_p);
7499 out:
7500         return error;
7501 }
7502
7503 /**
7504  * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
7505  * @phba: pointer to lpfc hba data structure.
7506  *
7507  * This routine is invoked to unset the PCI device memory space for device
7508  * with SLI-4 interface spec.
7509  **/
7510 static void
7511 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7512 {
7513         uint32_t if_type;
7514         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7515
7516         switch (if_type) {
7517         case LPFC_SLI_INTF_IF_TYPE_0:
7518                 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7519                 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7520                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7521                 break;
7522         case LPFC_SLI_INTF_IF_TYPE_2:
7523                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7524                 break;
7525         case LPFC_SLI_INTF_IF_TYPE_1:
7526         default:
7527                 dev_printk(KERN_ERR, &phba->pcidev->dev,
7528                            "FATAL - unsupported SLI4 interface type - %d\n",
7529                            if_type);
7530                 break;
7531         }
7532 }
7533
7534 /**
7535  * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
7536  * @phba: pointer to lpfc hba data structure.
7537  *
7538  * This routine is invoked to enable the MSI-X interrupt vectors to device
7539  * with SLI-3 interface specs. The kernel function pci_enable_msix() is
7540  * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
7541  * invoked, enables either all or nothing, depending on the current
7542  * availability of PCI vector resources. The device driver is responsible
7543  * for calling the individual request_irq() to register each MSI-X vector
7544  * with a interrupt handler, which is done in this function. Note that
7545  * later when device is unloading, the driver should always call free_irq()
7546  * on all MSI-X vectors it has done request_irq() on before calling
7547  * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
7548  * will be left with MSI-X enabled and leaks its vectors.
7549  *
7550  * Return codes
7551  *   0 - successful
7552  *   other values - error
7553  **/
7554 static int
7555 lpfc_sli_enable_msix(struct lpfc_hba *phba)
7556 {
7557         int rc, i;
7558         LPFC_MBOXQ_t *pmb;
7559
7560         /* Set up MSI-X multi-message vectors */
7561         for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7562                 phba->msix_entries[i].entry = i;
7563
7564         /* Configure MSI-X capability structure */
7565         rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
7566                                 ARRAY_SIZE(phba->msix_entries));
7567         if (rc) {
7568                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7569                                 "0420 PCI enable MSI-X failed (%d)\n", rc);
7570                 goto msi_fail_out;
7571         }
7572         for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7573                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7574                                 "0477 MSI-X entry[%d]: vector=x%x "
7575                                 "message=%d\n", i,
7576                                 phba->msix_entries[i].vector,
7577                                 phba->msix_entries[i].entry);
7578         /*
7579          * Assign MSI-X vectors to interrupt handlers
7580          */
7581
7582         /* vector-0 is associated to slow-path handler */
7583         rc = request_irq(phba->msix_entries[0].vector,
7584                          &lpfc_sli_sp_intr_handler, IRQF_SHARED,
7585                          LPFC_SP_DRIVER_HANDLER_NAME, phba);
7586         if (rc) {
7587                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7588                                 "0421 MSI-X slow-path request_irq failed "
7589                                 "(%d)\n", rc);
7590                 goto msi_fail_out;
7591         }
7592
7593         /* vector-1 is associated to fast-path handler */
7594         rc = request_irq(phba->msix_entries[1].vector,
7595                          &lpfc_sli_fp_intr_handler, IRQF_SHARED,
7596                          LPFC_FP_DRIVER_HANDLER_NAME, phba);
7597
7598         if (rc) {
7599                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7600                                 "0429 MSI-X fast-path request_irq failed "
7601                                 "(%d)\n", rc);
7602                 goto irq_fail_out;
7603         }
7604
7605         /*
7606          * Configure HBA MSI-X attention conditions to messages
7607          */
7608         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7609
7610         if (!pmb) {
7611                 rc = -ENOMEM;
7612                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7613                                 "0474 Unable to allocate memory for issuing "
7614                                 "MBOX_CONFIG_MSI command\n");
7615                 goto mem_fail_out;
7616         }
7617         rc = lpfc_config_msi(phba, pmb);
7618         if (rc)
7619                 goto mbx_fail_out;
7620         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7621         if (rc != MBX_SUCCESS) {
7622                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
7623                                 "0351 Config MSI mailbox command failed, "
7624                                 "mbxCmd x%x, mbxStatus x%x\n",
7625                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
7626                 goto mbx_fail_out;
7627         }
7628
7629         /* Free memory allocated for mailbox command */
7630         mempool_free(pmb, phba->mbox_mem_pool);
7631         return rc;
7632
7633 mbx_fail_out:
7634         /* Free memory allocated for mailbox command */
7635         mempool_free(pmb, phba->mbox_mem_pool);
7636
7637 mem_fail_out:
7638         /* free the irq already requested */
7639         free_irq(phba->msix_entries[1].vector, phba);
7640
7641 irq_fail_out:
7642         /* free the irq already requested */
7643         free_irq(phba->msix_entries[0].vector, phba);
7644
7645 msi_fail_out:
7646         /* Unconfigure MSI-X capability structure */
7647         pci_disable_msix(phba->pcidev);
7648         return rc;
7649 }
7650
7651 /**
7652  * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
7653  * @phba: pointer to lpfc hba data structure.
7654  *
7655  * This routine is invoked to release the MSI-X vectors and then disable the
7656  * MSI-X interrupt mode to device with SLI-3 interface spec.
7657  **/
7658 static void
7659 lpfc_sli_disable_msix(struct lpfc_hba *phba)
7660 {
7661         int i;
7662
7663         /* Free up MSI-X multi-message vectors */
7664         for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7665                 free_irq(phba->msix_entries[i].vector, phba);
7666         /* Disable MSI-X */
7667         pci_disable_msix(phba->pcidev);
7668
7669         return;
7670 }
7671
7672 /**
7673  * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
7674  * @phba: pointer to lpfc hba data structure.
7675  *
7676  * This routine is invoked to enable the MSI interrupt mode to device with
7677  * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
7678  * enable the MSI vector. The device driver is responsible for calling the
7679  * request_irq() to register MSI vector with a interrupt the handler, which
7680  * is done in this function.
7681  *
7682  * Return codes
7683  *      0 - successful
7684  *      other values - error
7685  */
7686 static int
7687 lpfc_sli_enable_msi(struct lpfc_hba *phba)
7688 {
7689         int rc;
7690
7691         rc = pci_enable_msi(phba->pcidev);
7692         if (!rc)
7693                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7694                                 "0462 PCI enable MSI mode success.\n");
7695         else {
7696                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7697                                 "0471 PCI enable MSI mode failed (%d)\n", rc);
7698                 return rc;
7699         }
7700
7701         rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7702                          IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7703         if (rc) {
7704                 pci_disable_msi(phba->pcidev);
7705                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7706                                 "0478 MSI request_irq failed (%d)\n", rc);
7707         }
7708         return rc;
7709 }
7710
7711 /**
7712  * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
7713  * @phba: pointer to lpfc hba data structure.
7714  *
7715  * This routine is invoked to disable the MSI interrupt mode to device with
7716  * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
7717  * done request_irq() on before calling pci_disable_msi(). Failure to do so
7718  * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7719  * its vector.
7720  */
7721 static void
7722 lpfc_sli_disable_msi(struct lpfc_hba *phba)
7723 {
7724         free_irq(phba->pcidev->irq, phba);
7725         pci_disable_msi(phba->pcidev);
7726         return;
7727 }
7728
7729 /**
7730  * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
7731  * @phba: pointer to lpfc hba data structure.
7732  *
7733  * This routine is invoked to enable device interrupt and associate driver's
7734  * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
7735  * spec. Depends on the interrupt mode configured to the driver, the driver
7736  * will try to fallback from the configured interrupt mode to an interrupt
7737  * mode which is supported by the platform, kernel, and device in the order
7738  * of:
7739  * MSI-X -> MSI -> IRQ.
7740  *
7741  * Return codes
7742  *   0 - successful
7743  *   other values - error
7744  **/
7745 static uint32_t
7746 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7747 {
7748         uint32_t intr_mode = LPFC_INTR_ERROR;
7749         int retval;
7750
7751         if (cfg_mode == 2) {
7752                 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
7753                 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
7754                 if (!retval) {
7755                         /* Now, try to enable MSI-X interrupt mode */
7756                         retval = lpfc_sli_enable_msix(phba);
7757                         if (!retval) {
7758                                 /* Indicate initialization to MSI-X mode */
7759                                 phba->intr_type = MSIX;
7760                                 intr_mode = 2;
7761                         }
7762                 }
7763         }
7764
7765         /* Fallback to MSI if MSI-X initialization failed */
7766         if (cfg_mode >= 1 && phba->intr_type == NONE) {
7767                 retval = lpfc_sli_enable_msi(phba);
7768                 if (!retval) {
7769                         /* Indicate initialization to MSI mode */
7770                         phba->intr_type = MSI;
7771                         intr_mode = 1;
7772                 }
7773         }
7774
7775         /* Fallback to INTx if both MSI-X/MSI initalization failed */
7776         if (phba->intr_type == NONE) {
7777                 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7778                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7779                 if (!retval) {
7780                         /* Indicate initialization to INTx mode */
7781                         phba->intr_type = INTx;
7782                         intr_mode = 0;
7783                 }
7784         }
7785         return intr_mode;
7786 }
7787
7788 /**
7789  * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
7790  * @phba: pointer to lpfc hba data structure.
7791  *
7792  * This routine is invoked to disable device interrupt and disassociate the
7793  * driver's interrupt handler(s) from interrupt vector(s) to device with
7794  * SLI-3 interface spec. Depending on the interrupt mode, the driver will
7795  * release the interrupt vector(s) for the message signaled interrupt.
7796  **/
7797 static void
7798 lpfc_sli_disable_intr(struct lpfc_hba *phba)
7799 {
7800         /* Disable the currently initialized interrupt mode */
7801         if (phba->intr_type == MSIX)
7802                 lpfc_sli_disable_msix(phba);
7803         else if (phba->intr_type == MSI)
7804                 lpfc_sli_disable_msi(phba);
7805         else if (phba->intr_type == INTx)
7806                 free_irq(phba->pcidev->irq, phba);
7807
7808         /* Reset interrupt management states */
7809         phba->intr_type = NONE;
7810         phba->sli.slistat.sli_intr = 0;
7811
7812         return;
7813 }
7814
7815 /**
7816  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7817  * @phba: pointer to lpfc hba data structure.
7818  *
7819  * This routine is invoked to enable the MSI-X interrupt vectors to device
7820  * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7821  * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7822  * enables either all or nothing, depending on the current availability of
7823  * PCI vector resources. The device driver is responsible for calling the
7824  * individual request_irq() to register each MSI-X vector with a interrupt
7825  * handler, which is done in this function. Note that later when device is
7826  * unloading, the driver should always call free_irq() on all MSI-X vectors
7827  * it has done request_irq() on before calling pci_disable_msix(). Failure
7828  * to do so results in a BUG_ON() and a device will be left with MSI-X
7829  * enabled and leaks its vectors.
7830  *
7831  * Return codes
7832  * 0 - successful
7833  * other values - error
7834  **/
7835 static int
7836 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7837 {
7838         int vectors, rc, index;
7839
7840         /* Set up MSI-X multi-message vectors */
7841         for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7842                 phba->sli4_hba.msix_entries[index].entry = index;
7843
7844         /* Configure MSI-X capability structure */
7845         vectors = phba->sli4_hba.cfg_eqn;
7846 enable_msix_vectors:
7847         rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7848                              vectors);
7849         if (rc > 1) {
7850                 vectors = rc;
7851                 goto enable_msix_vectors;
7852         } else if (rc) {
7853                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7854                                 "0484 PCI enable MSI-X failed (%d)\n", rc);
7855                 goto msi_fail_out;
7856         }
7857
7858         /* Log MSI-X vector assignment */
7859         for (index = 0; index < vectors; index++)
7860                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7861                                 "0489 MSI-X entry[%d]: vector=x%x "
7862                                 "message=%d\n", index,
7863                                 phba->sli4_hba.msix_entries[index].vector,
7864                                 phba->sli4_hba.msix_entries[index].entry);
7865         /*
7866          * Assign MSI-X vectors to interrupt handlers
7867          */
7868         if (vectors > 1)
7869                 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7870                                  &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7871                                  LPFC_SP_DRIVER_HANDLER_NAME, phba);
7872         else
7873                 /* All Interrupts need to be handled by one EQ */
7874                 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7875                                  &lpfc_sli4_intr_handler, IRQF_SHARED,
7876                                  LPFC_DRIVER_NAME, phba);
7877         if (rc) {
7878                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7879                                 "0485 MSI-X slow-path request_irq failed "
7880                                 "(%d)\n", rc);
7881                 goto msi_fail_out;
7882         }
7883
7884         /* The rest of the vector(s) are associated to fast-path handler(s) */
7885         for (index = 1; index < vectors; index++) {
7886                 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7887                 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7888                 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7889                                  &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7890                                  LPFC_FP_DRIVER_HANDLER_NAME,
7891                                  &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7892                 if (rc) {
7893                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7894                                         "0486 MSI-X fast-path (%d) "
7895                                         "request_irq failed (%d)\n", index, rc);
7896                         goto cfg_fail_out;
7897                 }
7898         }
7899         phba->sli4_hba.msix_vec_nr = vectors;
7900
7901         return rc;
7902
7903 cfg_fail_out:
7904         /* free the irq already requested */
7905         for (--index; index >= 1; index--)
7906                 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7907                          &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7908
7909         /* free the irq already requested */
7910         free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7911
7912 msi_fail_out:
7913         /* Unconfigure MSI-X capability structure */
7914         pci_disable_msix(phba->pcidev);
7915         return rc;
7916 }
7917
7918 /**
7919  * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7920  * @phba: pointer to lpfc hba data structure.
7921  *
7922  * This routine is invoked to release the MSI-X vectors and then disable the
7923  * MSI-X interrupt mode to device with SLI-4 interface spec.
7924  **/
7925 static void
7926 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7927 {
7928         int index;
7929
7930         /* Free up MSI-X multi-message vectors */
7931         free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7932
7933         for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7934                 free_irq(phba->sli4_hba.msix_entries[index].vector,
7935                          &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7936
7937         /* Disable MSI-X */
7938         pci_disable_msix(phba->pcidev);
7939
7940         return;
7941 }
7942
7943 /**
7944  * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7945  * @phba: pointer to lpfc hba data structure.
7946  *
7947  * This routine is invoked to enable the MSI interrupt mode to device with
7948  * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7949  * to enable the MSI vector. The device driver is responsible for calling
7950  * the request_irq() to register MSI vector with a interrupt the handler,
7951  * which is done in this function.
7952  *
7953  * Return codes
7954  *      0 - successful
7955  *      other values - error
7956  **/
7957 static int
7958 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7959 {
7960         int rc, index;
7961
7962         rc = pci_enable_msi(phba->pcidev);
7963         if (!rc)
7964                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7965                                 "0487 PCI enable MSI mode success.\n");
7966         else {
7967                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7968                                 "0488 PCI enable MSI mode failed (%d)\n", rc);
7969                 return rc;
7970         }
7971
7972         rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7973                          IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7974         if (rc) {
7975                 pci_disable_msi(phba->pcidev);
7976                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7977                                 "0490 MSI request_irq failed (%d)\n", rc);
7978                 return rc;
7979         }
7980
7981         for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7982                 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7983                 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7984         }
7985
7986         return 0;
7987 }
7988
7989 /**
7990  * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7991  * @phba: pointer to lpfc hba data structure.
7992  *
7993  * This routine is invoked to disable the MSI interrupt mode to device with
7994  * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7995  * done request_irq() on before calling pci_disable_msi(). Failure to do so
7996  * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7997  * its vector.
7998  **/
7999 static void
8000 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
8001 {
8002         free_irq(phba->pcidev->irq, phba);
8003         pci_disable_msi(phba->pcidev);
8004         return;
8005 }
8006
8007 /**
8008  * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
8009  * @phba: pointer to lpfc hba data structure.
8010  *
8011  * This routine is invoked to enable device interrupt and associate driver's
8012  * interrupt handler(s) to interrupt vector(s) to device with SLI-4
8013  * interface spec. Depends on the interrupt mode configured to the driver,
8014  * the driver will try to fallback from the configured interrupt mode to an
8015  * interrupt mode which is supported by the platform, kernel, and device in
8016  * the order of:
8017  * MSI-X -> MSI -> IRQ.
8018  *
8019  * Return codes
8020  *      0 - successful
8021  *      other values - error
8022  **/
8023 static uint32_t
8024 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8025 {
8026         uint32_t intr_mode = LPFC_INTR_ERROR;
8027         int retval, index;
8028
8029         if (cfg_mode == 2) {
8030                 /* Preparation before conf_msi mbox cmd */
8031                 retval = 0;
8032                 if (!retval) {
8033                         /* Now, try to enable MSI-X interrupt mode */
8034                         retval = lpfc_sli4_enable_msix(phba);
8035                         if (!retval) {
8036                                 /* Indicate initialization to MSI-X mode */
8037                                 phba->intr_type = MSIX;
8038                                 intr_mode = 2;
8039                         }
8040                 }
8041         }
8042
8043         /* Fallback to MSI if MSI-X initialization failed */
8044         if (cfg_mode >= 1 && phba->intr_type == NONE) {
8045                 retval = lpfc_sli4_enable_msi(phba);
8046                 if (!retval) {
8047                         /* Indicate initialization to MSI mode */
8048                         phba->intr_type = MSI;
8049                         intr_mode = 1;
8050                 }
8051         }
8052
8053         /* Fallback to INTx if both MSI-X/MSI initalization failed */
8054         if (phba->intr_type == NONE) {
8055                 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8056                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8057                 if (!retval) {
8058                         /* Indicate initialization to INTx mode */
8059                         phba->intr_type = INTx;
8060                         intr_mode = 0;
8061                         for (index = 0; index < phba->cfg_fcp_eq_count;
8062                              index++) {
8063                                 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8064                                 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8065                         }
8066                 }
8067         }
8068         return intr_mode;
8069 }
8070
8071 /**
8072  * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
8073  * @phba: pointer to lpfc hba data structure.
8074  *
8075  * This routine is invoked to disable device interrupt and disassociate
8076  * the driver's interrupt handler(s) from interrupt vector(s) to device
8077  * with SLI-4 interface spec. Depending on the interrupt mode, the driver
8078  * will release the interrupt vector(s) for the message signaled interrupt.
8079  **/
8080 static void
8081 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
8082 {
8083         /* Disable the currently initialized interrupt mode */
8084         if (phba->intr_type == MSIX)
8085                 lpfc_sli4_disable_msix(phba);
8086         else if (phba->intr_type == MSI)
8087                 lpfc_sli4_disable_msi(phba);
8088         else if (phba->intr_type == INTx)
8089                 free_irq(phba->pcidev->irq, phba);
8090
8091         /* Reset interrupt management states */
8092         phba->intr_type = NONE;
8093         phba->sli.slistat.sli_intr = 0;
8094
8095         return;
8096 }
8097
8098 /**
8099  * lpfc_unset_hba - Unset SLI3 hba device initialization
8100  * @phba: pointer to lpfc hba data structure.
8101  *
8102  * This routine is invoked to unset the HBA device initialization steps to
8103  * a device with SLI-3 interface spec.
8104  **/
8105 static void
8106 lpfc_unset_hba(struct lpfc_hba *phba)
8107 {
8108         struct lpfc_vport *vport = phba->pport;
8109         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
8110
8111         spin_lock_irq(shost->host_lock);
8112         vport->load_flag |= FC_UNLOADING;
8113         spin_unlock_irq(shost->host_lock);
8114
8115         lpfc_stop_hba_timers(phba);
8116
8117         phba->pport->work_port_events = 0;
8118
8119         lpfc_sli_hba_down(phba);
8120
8121         lpfc_sli_brdrestart(phba);
8122
8123         lpfc_sli_disable_intr(phba);
8124
8125         return;
8126 }
8127
8128 /**
8129  * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
8130  * @phba: pointer to lpfc hba data structure.
8131  *
8132  * This routine is invoked to unset the HBA device initialization steps to
8133  * a device with SLI-4 interface spec.
8134  **/
8135 static void
8136 lpfc_sli4_unset_hba(struct lpfc_hba *phba)
8137 {
8138         struct lpfc_vport *vport = phba->pport;
8139         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
8140
8141         spin_lock_irq(shost->host_lock);
8142         vport->load_flag |= FC_UNLOADING;
8143         spin_unlock_irq(shost->host_lock);
8144
8145         phba->pport->work_port_events = 0;
8146
8147         /* Stop the SLI4 device port */
8148         lpfc_stop_port(phba);
8149
8150         lpfc_sli4_disable_intr(phba);
8151
8152         /* Reset SLI4 HBA FCoE function */
8153         lpfc_pci_function_reset(phba);
8154         lpfc_sli4_queue_destroy(phba);
8155
8156         return;
8157 }
8158
8159 /**
8160  * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
8161  * @phba: Pointer to HBA context object.
8162  *
8163  * This function is called in the SLI4 code path to wait for completion
8164  * of device's XRIs exchange busy. It will check the XRI exchange busy
8165  * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
8166  * that, it will check the XRI exchange busy on outstanding FCP and ELS
8167  * I/Os every 30 seconds, log error message, and wait forever. Only when
8168  * all XRI exchange busy complete, the driver unload shall proceed with
8169  * invoking the function reset ioctl mailbox command to the CNA and the
8170  * the rest of the driver unload resource release.
8171  **/
8172 static void
8173 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
8174 {
8175         int wait_time = 0;
8176         int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8177         int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8178
8179         while (!fcp_xri_cmpl || !els_xri_cmpl) {
8180                 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
8181                         if (!fcp_xri_cmpl)
8182                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8183                                                 "2877 FCP XRI exchange busy "
8184                                                 "wait time: %d seconds.\n",
8185                                                 wait_time/1000);
8186                         if (!els_xri_cmpl)
8187                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8188                                                 "2878 ELS XRI exchange busy "
8189                                                 "wait time: %d seconds.\n",
8190                                                 wait_time/1000);
8191                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
8192                         wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
8193                 } else {
8194                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
8195                         wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
8196                 }
8197                 fcp_xri_cmpl =
8198                         list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8199                 els_xri_cmpl =
8200                         list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8201         }
8202 }
8203
8204 /**
8205  * lpfc_sli4_hba_unset - Unset the fcoe hba
8206  * @phba: Pointer to HBA context object.
8207  *
8208  * This function is called in the SLI4 code path to reset the HBA's FCoE
8209  * function. The caller is not required to hold any lock. This routine
8210  * issues PCI function reset mailbox command to reset the FCoE function.
8211  * At the end of the function, it calls lpfc_hba_down_post function to
8212  * free any pending commands.
8213  **/
8214 static void
8215 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
8216 {
8217         int wait_cnt = 0;
8218         LPFC_MBOXQ_t *mboxq;
8219         struct pci_dev *pdev = phba->pcidev;
8220
8221         lpfc_stop_hba_timers(phba);
8222         phba->sli4_hba.intr_enable = 0;
8223
8224         /*
8225          * Gracefully wait out the potential current outstanding asynchronous
8226          * mailbox command.
8227          */
8228
8229         /* First, block any pending async mailbox command from posted */
8230         spin_lock_irq(&phba->hbalock);
8231         phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8232         spin_unlock_irq(&phba->hbalock);
8233         /* Now, trying to wait it out if we can */
8234         while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8235                 msleep(10);
8236                 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
8237                         break;
8238         }
8239         /* Forcefully release the outstanding mailbox command if timed out */
8240         if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8241                 spin_lock_irq(&phba->hbalock);
8242                 mboxq = phba->sli.mbox_active;
8243                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8244                 __lpfc_mbox_cmpl_put(phba, mboxq);
8245                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8246                 phba->sli.mbox_active = NULL;
8247                 spin_unlock_irq(&phba->hbalock);
8248         }
8249
8250         /* Abort all iocbs associated with the hba */
8251         lpfc_sli_hba_iocb_abort(phba);
8252
8253         /* Wait for completion of device XRI exchange busy */
8254         lpfc_sli4_xri_exchange_busy_wait(phba);
8255
8256         /* Disable PCI subsystem interrupt */
8257         lpfc_sli4_disable_intr(phba);
8258
8259         /* Disable SR-IOV if enabled */
8260         if (phba->cfg_sriov_nr_virtfn)
8261                 pci_disable_sriov(pdev);
8262
8263         /* Stop kthread signal shall trigger work_done one more time */
8264         kthread_stop(phba->worker_thread);
8265
8266         /* Reset SLI4 HBA FCoE function */
8267         lpfc_pci_function_reset(phba);
8268         lpfc_sli4_queue_destroy(phba);
8269
8270         /* Stop the SLI4 device port */
8271         phba->pport->work_port_events = 0;
8272 }
8273
8274  /**
8275  * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
8276  * @phba: Pointer to HBA context object.
8277  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8278  *
8279  * This function is called in the SLI4 code path to read the port's
8280  * sli4 capabilities.
8281  *
8282  * This function may be be called from any context that can block-wait
8283  * for the completion.  The expectation is that this routine is called
8284  * typically from probe_one or from the online routine.
8285  **/
8286 int
8287 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8288 {
8289         int rc;
8290         struct lpfc_mqe *mqe;
8291         struct lpfc_pc_sli4_params *sli4_params;
8292         uint32_t mbox_tmo;
8293
8294         rc = 0;
8295         mqe = &mboxq->u.mqe;
8296
8297         /* Read the port's SLI4 Parameters port capabilities */
8298         lpfc_pc_sli4_params(mboxq);
8299         if (!phba->sli4_hba.intr_enable)
8300                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8301         else {
8302                 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8303                 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8304         }
8305
8306         if (unlikely(rc))
8307                 return 1;
8308
8309         sli4_params = &phba->sli4_hba.pc_sli4_params;
8310         sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
8311         sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
8312         sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
8313         sli4_params->featurelevel_1 = bf_get(featurelevel_1,
8314                                              &mqe->un.sli4_params);
8315         sli4_params->featurelevel_2 = bf_get(featurelevel_2,
8316                                              &mqe->un.sli4_params);
8317         sli4_params->proto_types = mqe->un.sli4_params.word3;
8318         sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
8319         sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
8320         sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
8321         sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
8322         sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
8323         sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
8324         sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
8325         sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
8326         sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
8327         sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
8328         sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
8329         sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
8330         sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
8331         sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
8332         sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
8333         sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
8334         sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
8335         sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
8336         sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
8337         sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8338
8339         /* Make sure that sge_supp_len can be handled by the driver */
8340         if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8341                 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8342
8343         return rc;
8344 }
8345
8346 /**
8347  * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
8348  * @phba: Pointer to HBA context object.
8349  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8350  *
8351  * This function is called in the SLI4 code path to read the port's
8352  * sli4 capabilities.
8353  *
8354  * This function may be be called from any context that can block-wait
8355  * for the completion.  The expectation is that this routine is called
8356  * typically from probe_one or from the online routine.
8357  **/
8358 int
8359 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8360 {
8361         int rc;
8362         struct lpfc_mqe *mqe = &mboxq->u.mqe;
8363         struct lpfc_pc_sli4_params *sli4_params;
8364         uint32_t mbox_tmo;
8365         int length;
8366         struct lpfc_sli4_parameters *mbx_sli4_parameters;
8367
8368         /*
8369          * By default, the driver assumes the SLI4 port requires RPI
8370          * header postings.  The SLI4_PARAM response will correct this
8371          * assumption.
8372          */
8373         phba->sli4_hba.rpi_hdrs_in_use = 1;
8374
8375         /* Read the port's SLI4 Config Parameters */
8376         length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
8377                   sizeof(struct lpfc_sli4_cfg_mhdr));
8378         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8379                          LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
8380                          length, LPFC_SLI4_MBX_EMBED);
8381         if (!phba->sli4_hba.intr_enable)
8382                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8383         else {
8384                 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8385                 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8386         }
8387         if (unlikely(rc))
8388                 return rc;
8389         sli4_params = &phba->sli4_hba.pc_sli4_params;
8390         mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
8391         sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
8392         sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
8393         sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
8394         sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
8395                                              mbx_sli4_parameters);
8396         sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
8397                                              mbx_sli4_parameters);
8398         if (bf_get(cfg_phwq, mbx_sli4_parameters))
8399                 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
8400         else
8401                 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
8402         sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
8403         sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
8404         sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
8405         sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
8406         sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
8407         sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
8408         sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
8409                                             mbx_sli4_parameters);
8410         sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
8411                                            mbx_sli4_parameters);
8412         phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8413         phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8414
8415         /* Make sure that sge_supp_len can be handled by the driver */
8416         if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8417                 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8418
8419         return 0;
8420 }
8421
8422 /**
8423  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
8424  * @pdev: pointer to PCI device
8425  * @pid: pointer to PCI device identifier
8426  *
8427  * This routine is to be called to attach a device with SLI-3 interface spec
8428  * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8429  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8430  * information of the device and driver to see if the driver state that it can
8431  * support this kind of device. If the match is successful, the driver core
8432  * invokes this routine. If this routine determines it can claim the HBA, it
8433  * does all the initialization that it needs to do to handle the HBA properly.
8434  *
8435  * Return code
8436  *      0 - driver can claim the device
8437  *      negative value - driver can not claim the device
8438  **/
8439 static int __devinit
8440 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
8441 {
8442         struct lpfc_hba   *phba;
8443         struct lpfc_vport *vport = NULL;
8444         struct Scsi_Host  *shost = NULL;
8445         int error;
8446         uint32_t cfg_mode, intr_mode;
8447
8448         /* Allocate memory for HBA structure */
8449         phba = lpfc_hba_alloc(pdev);
8450         if (!phba)
8451                 return -ENOMEM;
8452
8453         /* Perform generic PCI device enabling operation */
8454         error = lpfc_enable_pci_dev(phba);
8455         if (error)
8456                 goto out_free_phba;
8457
8458         /* Set up SLI API function jump table for PCI-device group-0 HBAs */
8459         error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
8460         if (error)
8461                 goto out_disable_pci_dev;
8462
8463         /* Set up SLI-3 specific device PCI memory space */
8464         error = lpfc_sli_pci_mem_setup(phba);
8465         if (error) {
8466                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8467                                 "1402 Failed to set up pci memory space.\n");
8468                 goto out_disable_pci_dev;
8469         }
8470
8471         /* Set up phase-1 common device driver resources */
8472         error = lpfc_setup_driver_resource_phase1(phba);
8473         if (error) {
8474                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8475                                 "1403 Failed to set up driver resource.\n");
8476                 goto out_unset_pci_mem_s3;
8477         }
8478
8479         /* Set up SLI-3 specific device driver resources */
8480         error = lpfc_sli_driver_resource_setup(phba);
8481         if (error) {
8482                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8483                                 "1404 Failed to set up driver resource.\n");
8484                 goto out_unset_pci_mem_s3;
8485         }
8486
8487         /* Initialize and populate the iocb list per host */
8488         error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
8489         if (error) {
8490                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8491                                 "1405 Failed to initialize iocb list.\n");
8492                 goto out_unset_driver_resource_s3;
8493         }
8494
8495         /* Set up common device driver resources */
8496         error = lpfc_setup_driver_resource_phase2(phba);
8497         if (error) {
8498                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8499                                 "1406 Failed to set up driver resource.\n");
8500                 goto out_free_iocb_list;
8501         }
8502
8503         /* Get the default values for Model Name and Description */
8504         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
8505
8506         /* Create SCSI host to the physical port */
8507         error = lpfc_create_shost(phba);
8508         if (error) {
8509                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8510                                 "1407 Failed to create scsi host.\n");
8511                 goto out_unset_driver_resource;
8512         }
8513
8514         /* Configure sysfs attributes */
8515         vport = phba->pport;
8516         error = lpfc_alloc_sysfs_attr(vport);
8517         if (error) {
8518                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8519                                 "1476 Failed to allocate sysfs attr\n");
8520                 goto out_destroy_shost;
8521         }
8522
8523         shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8524         /* Now, trying to enable interrupt and bring up the device */
8525         cfg_mode = phba->cfg_use_msi;
8526         while (true) {
8527                 /* Put device to a known state before enabling interrupt */
8528                 lpfc_stop_port(phba);
8529                 /* Configure and enable interrupt */
8530                 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
8531                 if (intr_mode == LPFC_INTR_ERROR) {
8532                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8533                                         "0431 Failed to enable interrupt.\n");
8534                         error = -ENODEV;
8535                         goto out_free_sysfs_attr;
8536                 }
8537                 /* SLI-3 HBA setup */
8538                 if (lpfc_sli_hba_setup(phba)) {
8539                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8540                                         "1477 Failed to set up hba\n");
8541                         error = -ENODEV;
8542                         goto out_remove_device;
8543                 }
8544
8545                 /* Wait 50ms for the interrupts of previous mailbox commands */
8546                 msleep(50);
8547                 /* Check active interrupts on message signaled interrupts */
8548                 if (intr_mode == 0 ||
8549                     phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
8550                         /* Log the current active interrupt mode */
8551                         phba->intr_mode = intr_mode;
8552                         lpfc_log_intr_mode(phba, intr_mode);
8553                         break;
8554                 } else {
8555                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8556                                         "0447 Configure interrupt mode (%d) "
8557                                         "failed active interrupt test.\n",
8558                                         intr_mode);
8559                         /* Disable the current interrupt mode */
8560                         lpfc_sli_disable_intr(phba);
8561                         /* Try next level of interrupt mode */
8562                         cfg_mode = --intr_mode;
8563                 }
8564         }
8565
8566         /* Perform post initialization setup */
8567         lpfc_post_init_setup(phba);
8568
8569         /* Check if there are static vports to be created. */
8570         lpfc_create_static_vport(phba);
8571
8572         return 0;
8573
8574 out_remove_device:
8575         lpfc_unset_hba(phba);
8576 out_free_sysfs_attr:
8577         lpfc_free_sysfs_attr(vport);
8578 out_destroy_shost:
8579         lpfc_destroy_shost(phba);
8580 out_unset_driver_resource:
8581         lpfc_unset_driver_resource_phase2(phba);
8582 out_free_iocb_list:
8583         lpfc_free_iocb_list(phba);
8584 out_unset_driver_resource_s3:
8585         lpfc_sli_driver_resource_unset(phba);
8586 out_unset_pci_mem_s3:
8587         lpfc_sli_pci_mem_unset(phba);
8588 out_disable_pci_dev:
8589         lpfc_disable_pci_dev(phba);
8590         if (shost)
8591                 scsi_host_put(shost);
8592 out_free_phba:
8593         lpfc_hba_free(phba);
8594         return error;
8595 }
8596
8597 /**
8598  * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
8599  * @pdev: pointer to PCI device
8600  *
8601  * This routine is to be called to disattach a device with SLI-3 interface
8602  * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8603  * removed from PCI bus, it performs all the necessary cleanup for the HBA
8604  * device to be removed from the PCI subsystem properly.
8605  **/
8606 static void __devexit
8607 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8608 {
8609         struct Scsi_Host  *shost = pci_get_drvdata(pdev);
8610         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8611         struct lpfc_vport **vports;
8612         struct lpfc_hba   *phba = vport->phba;
8613         int i;
8614         int bars = pci_select_bars(pdev, IORESOURCE_MEM);
8615
8616         spin_lock_irq(&phba->hbalock);
8617         vport->load_flag |= FC_UNLOADING;
8618         spin_unlock_irq(&phba->hbalock);
8619
8620         lpfc_free_sysfs_attr(vport);
8621
8622         /* Release all the vports against this physical port */
8623         vports = lpfc_create_vport_work_array(phba);
8624         if (vports != NULL)
8625                 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8626                         fc_vport_terminate(vports[i]->fc_vport);
8627         lpfc_destroy_vport_work_array(phba, vports);
8628
8629         /* Remove FC host and then SCSI host with the physical port */
8630         fc_remove_host(shost);
8631         scsi_remove_host(shost);
8632         lpfc_cleanup(vport);
8633
8634         /*
8635          * Bring down the SLI Layer. This step disable all interrupts,
8636          * clears the rings, discards all mailbox commands, and resets
8637          * the HBA.
8638          */
8639
8640         /* HBA interrupt will be disabled after this call */
8641         lpfc_sli_hba_down(phba);
8642         /* Stop kthread signal shall trigger work_done one more time */
8643         kthread_stop(phba->worker_thread);
8644         /* Final cleanup of txcmplq and reset the HBA */
8645         lpfc_sli_brdrestart(phba);
8646
8647         lpfc_stop_hba_timers(phba);
8648         spin_lock_irq(&phba->hbalock);
8649         list_del_init(&vport->listentry);
8650         spin_unlock_irq(&phba->hbalock);
8651
8652         lpfc_debugfs_terminate(vport);
8653
8654         /* Disable SR-IOV if enabled */
8655         if (phba->cfg_sriov_nr_virtfn)
8656                 pci_disable_sriov(pdev);
8657
8658         /* Disable interrupt */
8659         lpfc_sli_disable_intr(phba);
8660
8661         pci_set_drvdata(pdev, NULL);
8662         scsi_host_put(shost);
8663
8664         /*
8665          * Call scsi_free before mem_free since scsi bufs are released to their
8666          * corresponding pools here.
8667          */
8668         lpfc_scsi_free(phba);
8669         lpfc_mem_free_all(phba);
8670
8671         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8672                           phba->hbqslimp.virt, phba->hbqslimp.phys);
8673
8674         /* Free resources associated with SLI2 interface */
8675         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8676                           phba->slim2p.virt, phba->slim2p.phys);
8677
8678         /* unmap adapter SLIM and Control Registers */
8679         iounmap(phba->ctrl_regs_memmap_p);
8680         iounmap(phba->slim_memmap_p);
8681
8682         lpfc_hba_free(phba);
8683
8684         pci_release_selected_regions(pdev, bars);
8685         pci_disable_device(pdev);
8686 }
8687
8688 /**
8689  * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
8690  * @pdev: pointer to PCI device
8691  * @msg: power management message
8692  *
8693  * This routine is to be called from the kernel's PCI subsystem to support
8694  * system Power Management (PM) to device with SLI-3 interface spec. When
8695  * PM invokes this method, it quiesces the device by stopping the driver's
8696  * worker thread for the device, turning off device's interrupt and DMA,
8697  * and bring the device offline. Note that as the driver implements the
8698  * minimum PM requirements to a power-aware driver's PM support for the
8699  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8700  * to the suspend() method call will be treated as SUSPEND and the driver will
8701  * fully reinitialize its device during resume() method call, the driver will
8702  * set device to PCI_D3hot state in PCI config space instead of setting it
8703  * according to the @msg provided by the PM.
8704  *
8705  * Return code
8706  *      0 - driver suspended the device
8707  *      Error otherwise
8708  **/
8709 static int
8710 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
8711 {
8712         struct Scsi_Host *shost = pci_get_drvdata(pdev);
8713         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8714
8715         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8716                         "0473 PCI device Power Management suspend.\n");
8717
8718         /* Bring down the device */
8719         lpfc_offline_prep(phba);
8720         lpfc_offline(phba);
8721         kthread_stop(phba->worker_thread);
8722
8723         /* Disable interrupt from device */
8724         lpfc_sli_disable_intr(phba);
8725
8726         /* Save device state to PCI config space */
8727         pci_save_state(pdev);
8728         pci_set_power_state(pdev, PCI_D3hot);
8729
8730         return 0;
8731 }
8732
8733 /**
8734  * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
8735  * @pdev: pointer to PCI device
8736  *
8737  * This routine is to be called from the kernel's PCI subsystem to support
8738  * system Power Management (PM) to device with SLI-3 interface spec. When PM
8739  * invokes this method, it restores the device's PCI config space state and
8740  * fully reinitializes the device and brings it online. Note that as the
8741  * driver implements the minimum PM requirements to a power-aware driver's
8742  * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
8743  * FREEZE) to the suspend() method call will be treated as SUSPEND and the
8744  * driver will fully reinitialize its device during resume() method call,
8745  * the device will be set to PCI_D0 directly in PCI config space before
8746  * restoring the state.
8747  *
8748  * Return code
8749  *      0 - driver suspended the device
8750  *      Error otherwise
8751  **/
8752 static int
8753 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
8754 {
8755         struct Scsi_Host *shost = pci_get_drvdata(pdev);
8756         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8757         uint32_t intr_mode;
8758         int error;
8759
8760         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8761                         "0452 PCI device Power Management resume.\n");
8762
8763         /* Restore device state from PCI config space */
8764         pci_set_power_state(pdev, PCI_D0);
8765         pci_restore_state(pdev);
8766
8767         /*
8768          * As the new kernel behavior of pci_restore_state() API call clears
8769          * device saved_state flag, need to save the restored state again.
8770          */
8771         pci_save_state(pdev);
8772
8773         if (pdev->is_busmaster)
8774                 pci_set_master(pdev);
8775
8776         /* Startup the kernel thread for this host adapter. */
8777         phba->worker_thread = kthread_run(lpfc_do_work, phba,
8778                                         "lpfc_worker_%d", phba->brd_no);
8779         if (IS_ERR(phba->worker_thread)) {
8780                 error = PTR_ERR(phba->worker_thread);
8781                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8782                                 "0434 PM resume failed to start worker "
8783                                 "thread: error=x%x.\n", error);
8784                 return error;
8785         }
8786
8787         /* Configure and enable interrupt */
8788         intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8789         if (intr_mode == LPFC_INTR_ERROR) {
8790                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8791                                 "0430 PM resume Failed to enable interrupt\n");
8792                 return -EIO;
8793         } else
8794                 phba->intr_mode = intr_mode;
8795
8796         /* Restart HBA and bring it online */
8797         lpfc_sli_brdrestart(phba);
8798         lpfc_online(phba);
8799
8800         /* Log the current active interrupt mode */
8801         lpfc_log_intr_mode(phba, phba->intr_mode);
8802
8803         return 0;
8804 }
8805
8806 /**
8807  * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
8808  * @phba: pointer to lpfc hba data structure.
8809  *
8810  * This routine is called to prepare the SLI3 device for PCI slot recover. It
8811  * aborts all the outstanding SCSI I/Os to the pci device.
8812  **/
8813 static void
8814 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
8815 {
8816         struct lpfc_sli *psli = &phba->sli;
8817         struct lpfc_sli_ring  *pring;
8818
8819         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8820                         "2723 PCI channel I/O abort preparing for recovery\n");
8821
8822         /*
8823          * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8824          * and let the SCSI mid-layer to retry them to recover.
8825          */
8826         pring = &psli->ring[psli->fcp_ring];
8827         lpfc_sli_abort_iocb_ring(phba, pring);
8828 }
8829
8830 /**
8831  * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
8832  * @phba: pointer to lpfc hba data structure.
8833  *
8834  * This routine is called to prepare the SLI3 device for PCI slot reset. It
8835  * disables the device interrupt and pci device, and aborts the internal FCP
8836  * pending I/Os.
8837  **/
8838 static void
8839 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
8840 {
8841         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8842                         "2710 PCI channel disable preparing for reset\n");
8843
8844         /* Block any management I/Os to the device */
8845         lpfc_block_mgmt_io(phba);
8846
8847         /* Block all SCSI devices' I/Os on the host */
8848         lpfc_scsi_dev_block(phba);
8849
8850         /* stop all timers */
8851         lpfc_stop_hba_timers(phba);
8852
8853         /* Disable interrupt and pci device */
8854         lpfc_sli_disable_intr(phba);
8855         pci_disable_device(phba->pcidev);
8856
8857         /* Flush all driver's outstanding SCSI I/Os as we are to reset */
8858         lpfc_sli_flush_fcp_rings(phba);
8859 }
8860
8861 /**
8862  * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
8863  * @phba: pointer to lpfc hba data structure.
8864  *
8865  * This routine is called to prepare the SLI3 device for PCI slot permanently
8866  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8867  * pending I/Os.
8868  **/
8869 static void
8870 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8871 {
8872         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8873                         "2711 PCI channel permanent disable for failure\n");
8874         /* Block all SCSI devices' I/Os on the host */
8875         lpfc_scsi_dev_block(phba);
8876
8877         /* stop all timers */
8878         lpfc_stop_hba_timers(phba);
8879
8880         /* Clean up all driver's outstanding SCSI I/Os */
8881         lpfc_sli_flush_fcp_rings(phba);
8882 }
8883
8884 /**
8885  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
8886  * @pdev: pointer to PCI device.
8887  * @state: the current PCI connection state.
8888  *
8889  * This routine is called from the PCI subsystem for I/O error handling to
8890  * device with SLI-3 interface spec. This function is called by the PCI
8891  * subsystem after a PCI bus error affecting this device has been detected.
8892  * When this function is invoked, it will need to stop all the I/Os and
8893  * interrupt(s) to the device. Once that is done, it will return
8894  * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
8895  * as desired.
8896  *
8897  * Return codes
8898  *      PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
8899  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8900  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8901  **/
8902 static pci_ers_result_t
8903 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
8904 {
8905         struct Scsi_Host *shost = pci_get_drvdata(pdev);
8906         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8907
8908         switch (state) {
8909         case pci_channel_io_normal:
8910                 /* Non-fatal error, prepare for recovery */
8911                 lpfc_sli_prep_dev_for_recover(phba);
8912                 return PCI_ERS_RESULT_CAN_RECOVER;
8913         case pci_channel_io_frozen:
8914                 /* Fatal error, prepare for slot reset */
8915                 lpfc_sli_prep_dev_for_reset(phba);
8916                 return PCI_ERS_RESULT_NEED_RESET;
8917         case pci_channel_io_perm_failure:
8918                 /* Permanent failure, prepare for device down */
8919                 lpfc_sli_prep_dev_for_perm_failure(phba);
8920                 return PCI_ERS_RESULT_DISCONNECT;
8921         default:
8922                 /* Unknown state, prepare and request slot reset */
8923                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8924                                 "0472 Unknown PCI error state: x%x\n", state);
8925                 lpfc_sli_prep_dev_for_reset(phba);
8926                 return PCI_ERS_RESULT_NEED_RESET;
8927         }
8928 }
8929
8930 /**
8931  * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
8932  * @pdev: pointer to PCI device.
8933  *
8934  * This routine is called from the PCI subsystem for error handling to
8935  * device with SLI-3 interface spec. This is called after PCI bus has been
8936  * reset to restart the PCI card from scratch, as if from a cold-boot.
8937  * During the PCI subsystem error recovery, after driver returns
8938  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8939  * recovery and then call this routine before calling the .resume method
8940  * to recover the device. This function will initialize the HBA device,
8941  * enable the interrupt, but it will just put the HBA to offline state
8942  * without passing any I/O traffic.
8943  *
8944  * Return codes
8945  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
8946  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8947  */
8948 static pci_ers_result_t
8949 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
8950 {
8951         struct Scsi_Host *shost = pci_get_drvdata(pdev);
8952         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8953         struct lpfc_sli *psli = &phba->sli;
8954         uint32_t intr_mode;
8955
8956         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8957         if (pci_enable_device_mem(pdev)) {
8958                 printk(KERN_ERR "lpfc: Cannot re-enable "
8959                         "PCI device after reset.\n");
8960                 return PCI_ERS_RESULT_DISCONNECT;
8961         }
8962
8963         pci_restore_state(pdev);
8964
8965         /*
8966          * As the new kernel behavior of pci_restore_state() API call clears
8967          * device saved_state flag, need to save the restored state again.
8968          */
8969         pci_save_state(pdev);
8970
8971         if (pdev->is_busmaster)
8972                 pci_set_master(pdev);
8973
8974         spin_lock_irq(&phba->hbalock);
8975         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8976         spin_unlock_irq(&phba->hbalock);
8977
8978         /* Configure and enable interrupt */
8979         intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8980         if (intr_mode == LPFC_INTR_ERROR) {
8981                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8982                                 "0427 Cannot re-enable interrupt after "
8983                                 "slot reset.\n");
8984                 return PCI_ERS_RESULT_DISCONNECT;
8985         } else
8986                 phba->intr_mode = intr_mode;
8987
8988         /* Take device offline, it will perform cleanup */
8989         lpfc_offline_prep(phba);
8990         lpfc_offline(phba);
8991         lpfc_sli_brdrestart(phba);
8992
8993         /* Log the current active interrupt mode */
8994         lpfc_log_intr_mode(phba, phba->intr_mode);
8995
8996         return PCI_ERS_RESULT_RECOVERED;
8997 }
8998
8999 /**
9000  * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
9001  * @pdev: pointer to PCI device
9002  *
9003  * This routine is called from the PCI subsystem for error handling to device
9004  * with SLI-3 interface spec. It is called when kernel error recovery tells
9005  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9006  * error recovery. After this call, traffic can start to flow from this device
9007  * again.
9008  */
9009 static void
9010 lpfc_io_resume_s3(struct pci_dev *pdev)
9011 {
9012         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9013         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9014
9015         /* Bring device online, it will be no-op for non-fatal error resume */
9016         lpfc_online(phba);
9017
9018         /* Clean up Advanced Error Reporting (AER) if needed */
9019         if (phba->hba_flag & HBA_AER_ENABLED)
9020                 pci_cleanup_aer_uncorrect_error_status(pdev);
9021 }
9022
9023 /**
9024  * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
9025  * @phba: pointer to lpfc hba data structure.
9026  *
9027  * returns the number of ELS/CT IOCBs to reserve
9028  **/
9029 int
9030 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
9031 {
9032         int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
9033
9034         if (phba->sli_rev == LPFC_SLI_REV4) {
9035                 if (max_xri <= 100)
9036                         return 10;
9037                 else if (max_xri <= 256)
9038                         return 25;
9039                 else if (max_xri <= 512)
9040                         return 50;
9041                 else if (max_xri <= 1024)
9042                         return 100;
9043                 else
9044                         return 150;
9045         } else
9046                 return 0;
9047 }
9048
9049 /**
9050  * lpfc_write_firmware - attempt to write a firmware image to the port
9051  * @phba: pointer to lpfc hba data structure.
9052  * @fw: pointer to firmware image returned from request_firmware.
9053  *
9054  * returns the number of bytes written if write is successful.
9055  * returns a negative error value if there were errors.
9056  * returns 0 if firmware matches currently active firmware on port.
9057  **/
9058 int
9059 lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
9060 {
9061         char fwrev[32];
9062         struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
9063         struct list_head dma_buffer_list;
9064         int i, rc = 0;
9065         struct lpfc_dmabuf *dmabuf, *next;
9066         uint32_t offset = 0, temp_offset = 0;
9067
9068         INIT_LIST_HEAD(&dma_buffer_list);
9069         if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
9070             (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
9071              LPFC_FILE_TYPE_GROUP) ||
9072             (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
9073             (be32_to_cpu(image->size) != fw->size)) {
9074                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9075                                 "3022 Invalid FW image found. "
9076                                 "Magic:%x Type:%x ID:%x\n",
9077                                 be32_to_cpu(image->magic_number),
9078                                 bf_get_be32(lpfc_grp_hdr_file_type, image),
9079                                 bf_get_be32(lpfc_grp_hdr_id, image));
9080                 return -EINVAL;
9081         }
9082         lpfc_decode_firmware_rev(phba, fwrev, 1);
9083         if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
9084                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9085                                 "3023 Updating Firmware. Current Version:%s "
9086                                 "New Version:%s\n",
9087                                 fwrev, image->revision);
9088                 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
9089                         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
9090                                          GFP_KERNEL);
9091                         if (!dmabuf) {
9092                                 rc = -ENOMEM;
9093                                 goto out;
9094                         }
9095                         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9096                                                           SLI4_PAGE_SIZE,
9097                                                           &dmabuf->phys,
9098                                                           GFP_KERNEL);
9099                         if (!dmabuf->virt) {
9100                                 kfree(dmabuf);
9101                                 rc = -ENOMEM;
9102                                 goto out;
9103                         }
9104                         list_add_tail(&dmabuf->list, &dma_buffer_list);
9105                 }
9106                 while (offset < fw->size) {
9107                         temp_offset = offset;
9108                         list_for_each_entry(dmabuf, &dma_buffer_list, list) {
9109                                 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
9110                                         memcpy(dmabuf->virt,
9111                                                fw->data + temp_offset,
9112                                                fw->size - temp_offset);
9113                                         temp_offset = fw->size;
9114                                         break;
9115                                 }
9116                                 memcpy(dmabuf->virt, fw->data + temp_offset,
9117                                        SLI4_PAGE_SIZE);
9118                                 temp_offset += SLI4_PAGE_SIZE;
9119                         }
9120                         rc = lpfc_wr_object(phba, &dma_buffer_list,
9121                                     (fw->size - offset), &offset);
9122                         if (rc) {
9123                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9124                                                 "3024 Firmware update failed. "
9125                                                 "%d\n", rc);
9126                                 goto out;
9127                         }
9128                 }
9129                 rc = offset;
9130         }
9131 out:
9132         list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
9133                 list_del(&dmabuf->list);
9134                 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
9135                                   dmabuf->virt, dmabuf->phys);
9136                 kfree(dmabuf);
9137         }
9138         return rc;
9139 }
9140
9141 /**
9142  * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
9143  * @pdev: pointer to PCI device
9144  * @pid: pointer to PCI device identifier
9145  *
9146  * This routine is called from the kernel's PCI subsystem to device with
9147  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9148  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
9149  * information of the device and driver to see if the driver state that it
9150  * can support this kind of device. If the match is successful, the driver
9151  * core invokes this routine. If this routine determines it can claim the HBA,
9152  * it does all the initialization that it needs to do to handle the HBA
9153  * properly.
9154  *
9155  * Return code
9156  *      0 - driver can claim the device
9157  *      negative value - driver can not claim the device
9158  **/
9159 static int __devinit
9160 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9161 {
9162         struct lpfc_hba   *phba;
9163         struct lpfc_vport *vport = NULL;
9164         struct Scsi_Host  *shost = NULL;
9165         int error;
9166         uint32_t cfg_mode, intr_mode;
9167         int mcnt;
9168         int adjusted_fcp_eq_count;
9169         const struct firmware *fw;
9170         uint8_t file_name[16];
9171
9172         /* Allocate memory for HBA structure */
9173         phba = lpfc_hba_alloc(pdev);
9174         if (!phba)
9175                 return -ENOMEM;
9176
9177         /* Perform generic PCI device enabling operation */
9178         error = lpfc_enable_pci_dev(phba);
9179         if (error)
9180                 goto out_free_phba;
9181
9182         /* Set up SLI API function jump table for PCI-device group-1 HBAs */
9183         error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
9184         if (error)
9185                 goto out_disable_pci_dev;
9186
9187         /* Set up SLI-4 specific device PCI memory space */
9188         error = lpfc_sli4_pci_mem_setup(phba);
9189         if (error) {
9190                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9191                                 "1410 Failed to set up pci memory space.\n");
9192                 goto out_disable_pci_dev;
9193         }
9194
9195         /* Set up phase-1 common device driver resources */
9196         error = lpfc_setup_driver_resource_phase1(phba);
9197         if (error) {
9198                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9199                                 "1411 Failed to set up driver resource.\n");
9200                 goto out_unset_pci_mem_s4;
9201         }
9202
9203         /* Set up SLI-4 Specific device driver resources */
9204         error = lpfc_sli4_driver_resource_setup(phba);
9205         if (error) {
9206                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9207                                 "1412 Failed to set up driver resource.\n");
9208                 goto out_unset_pci_mem_s4;
9209         }
9210
9211         /* Initialize and populate the iocb list per host */
9212
9213         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9214                         "2821 initialize iocb list %d.\n",
9215                         phba->cfg_iocb_cnt*1024);
9216         error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
9217
9218         if (error) {
9219                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9220                                 "1413 Failed to initialize iocb list.\n");
9221                 goto out_unset_driver_resource_s4;
9222         }
9223
9224         INIT_LIST_HEAD(&phba->active_rrq_list);
9225         INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
9226
9227         /* Set up common device driver resources */
9228         error = lpfc_setup_driver_resource_phase2(phba);
9229         if (error) {
9230                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9231                                 "1414 Failed to set up driver resource.\n");
9232                 goto out_free_iocb_list;
9233         }
9234
9235         /* Get the default values for Model Name and Description */
9236         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9237
9238         /* Create SCSI host to the physical port */
9239         error = lpfc_create_shost(phba);
9240         if (error) {
9241                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9242                                 "1415 Failed to create scsi host.\n");
9243                 goto out_unset_driver_resource;
9244         }
9245
9246         /* Configure sysfs attributes */
9247         vport = phba->pport;
9248         error = lpfc_alloc_sysfs_attr(vport);
9249         if (error) {
9250                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9251                                 "1416 Failed to allocate sysfs attr\n");
9252                 goto out_destroy_shost;
9253         }
9254
9255         shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9256         /* Now, trying to enable interrupt and bring up the device */
9257         cfg_mode = phba->cfg_use_msi;
9258         while (true) {
9259                 /* Put device to a known state before enabling interrupt */
9260                 lpfc_stop_port(phba);
9261                 /* Configure and enable interrupt */
9262                 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
9263                 if (intr_mode == LPFC_INTR_ERROR) {
9264                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9265                                         "0426 Failed to enable interrupt.\n");
9266                         error = -ENODEV;
9267                         goto out_free_sysfs_attr;
9268                 }
9269                 /* Default to single EQ for non-MSI-X */
9270                 if (phba->intr_type != MSIX)
9271                         adjusted_fcp_eq_count = 0;
9272                 else if (phba->sli4_hba.msix_vec_nr <
9273                                         phba->cfg_fcp_eq_count + 1)
9274                         adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9275                 else
9276                         adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9277                 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
9278                 /* Set up SLI-4 HBA */
9279                 if (lpfc_sli4_hba_setup(phba)) {
9280                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9281                                         "1421 Failed to set up hba\n");
9282                         error = -ENODEV;
9283                         goto out_disable_intr;
9284                 }
9285
9286                 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
9287                 if (intr_mode != 0)
9288                         mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
9289                                                             LPFC_ACT_INTR_CNT);
9290
9291                 /* Check active interrupts received only for MSI/MSI-X */
9292                 if (intr_mode == 0 ||
9293                     phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
9294                         /* Log the current active interrupt mode */
9295                         phba->intr_mode = intr_mode;
9296                         lpfc_log_intr_mode(phba, intr_mode);
9297                         break;
9298                 }
9299                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9300                                 "0451 Configure interrupt mode (%d) "
9301                                 "failed active interrupt test.\n",
9302                                 intr_mode);
9303                 /* Unset the previous SLI-4 HBA setup. */
9304                 /*
9305                  * TODO:  Is this operation compatible with IF TYPE 2
9306                  * devices?  All port state is deleted and cleared.
9307                  */
9308                 lpfc_sli4_unset_hba(phba);
9309                 /* Try next level of interrupt mode */
9310                 cfg_mode = --intr_mode;
9311         }
9312
9313         /* Perform post initialization setup */
9314         lpfc_post_init_setup(phba);
9315
9316         /* check for firmware upgrade or downgrade (if_type 2 only) */
9317         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9318             LPFC_SLI_INTF_IF_TYPE_2) {
9319                 snprintf(file_name, 16, "%s.grp", phba->ModelName);
9320                 error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9321                 if (!error) {
9322                         lpfc_write_firmware(phba, fw);
9323                         release_firmware(fw);
9324                 }
9325         }
9326
9327         /* Check if there are static vports to be created. */
9328         lpfc_create_static_vport(phba);
9329         return 0;
9330
9331 out_disable_intr:
9332         lpfc_sli4_disable_intr(phba);
9333 out_free_sysfs_attr:
9334         lpfc_free_sysfs_attr(vport);
9335 out_destroy_shost:
9336         lpfc_destroy_shost(phba);
9337 out_unset_driver_resource:
9338         lpfc_unset_driver_resource_phase2(phba);
9339 out_free_iocb_list:
9340         lpfc_free_iocb_list(phba);
9341 out_unset_driver_resource_s4:
9342         lpfc_sli4_driver_resource_unset(phba);
9343 out_unset_pci_mem_s4:
9344         lpfc_sli4_pci_mem_unset(phba);
9345 out_disable_pci_dev:
9346         lpfc_disable_pci_dev(phba);
9347         if (shost)
9348                 scsi_host_put(shost);
9349 out_free_phba:
9350         lpfc_hba_free(phba);
9351         return error;
9352 }
9353
9354 /**
9355  * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
9356  * @pdev: pointer to PCI device
9357  *
9358  * This routine is called from the kernel's PCI subsystem to device with
9359  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9360  * removed from PCI bus, it performs all the necessary cleanup for the HBA
9361  * device to be removed from the PCI subsystem properly.
9362  **/
9363 static void __devexit
9364 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9365 {
9366         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9367         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9368         struct lpfc_vport **vports;
9369         struct lpfc_hba *phba = vport->phba;
9370         int i;
9371
9372         /* Mark the device unloading flag */
9373         spin_lock_irq(&phba->hbalock);
9374         vport->load_flag |= FC_UNLOADING;
9375         spin_unlock_irq(&phba->hbalock);
9376
9377         /* Free the HBA sysfs attributes */
9378         lpfc_free_sysfs_attr(vport);
9379
9380         /* Release all the vports against this physical port */
9381         vports = lpfc_create_vport_work_array(phba);
9382         if (vports != NULL)
9383                 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
9384                         fc_vport_terminate(vports[i]->fc_vport);
9385         lpfc_destroy_vport_work_array(phba, vports);
9386
9387         /* Remove FC host and then SCSI host with the physical port */
9388         fc_remove_host(shost);
9389         scsi_remove_host(shost);
9390
9391         /* Perform cleanup on the physical port */
9392         lpfc_cleanup(vport);
9393
9394         /*
9395          * Bring down the SLI Layer. This step disables all interrupts,
9396          * clears the rings, discards all mailbox commands, and resets
9397          * the HBA FCoE function.
9398          */
9399         lpfc_debugfs_terminate(vport);
9400         lpfc_sli4_hba_unset(phba);
9401
9402         spin_lock_irq(&phba->hbalock);
9403         list_del_init(&vport->listentry);
9404         spin_unlock_irq(&phba->hbalock);
9405
9406         /* Perform scsi free before driver resource_unset since scsi
9407          * buffers are released to their corresponding pools here.
9408          */
9409         lpfc_scsi_free(phba);
9410         lpfc_sli4_driver_resource_unset(phba);
9411
9412         /* Unmap adapter Control and Doorbell registers */
9413         lpfc_sli4_pci_mem_unset(phba);
9414
9415         /* Release PCI resources and disable device's PCI function */
9416         scsi_host_put(shost);
9417         lpfc_disable_pci_dev(phba);
9418
9419         /* Finally, free the driver's device data structure */
9420         lpfc_hba_free(phba);
9421
9422         return;
9423 }
9424
9425 /**
9426  * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
9427  * @pdev: pointer to PCI device
9428  * @msg: power management message
9429  *
9430  * This routine is called from the kernel's PCI subsystem to support system
9431  * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
9432  * this method, it quiesces the device by stopping the driver's worker
9433  * thread for the device, turning off device's interrupt and DMA, and bring
9434  * the device offline. Note that as the driver implements the minimum PM
9435  * requirements to a power-aware driver's PM support for suspend/resume -- all
9436  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
9437  * method call will be treated as SUSPEND and the driver will fully
9438  * reinitialize its device during resume() method call, the driver will set
9439  * device to PCI_D3hot state in PCI config space instead of setting it
9440  * according to the @msg provided by the PM.
9441  *
9442  * Return code
9443  *      0 - driver suspended the device
9444  *      Error otherwise
9445  **/
9446 static int
9447 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
9448 {
9449         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9450         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9451
9452         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9453                         "2843 PCI device Power Management suspend.\n");
9454
9455         /* Bring down the device */
9456         lpfc_offline_prep(phba);
9457         lpfc_offline(phba);
9458         kthread_stop(phba->worker_thread);
9459
9460         /* Disable interrupt from device */
9461         lpfc_sli4_disable_intr(phba);
9462         lpfc_sli4_queue_destroy(phba);
9463
9464         /* Save device state to PCI config space */
9465         pci_save_state(pdev);
9466         pci_set_power_state(pdev, PCI_D3hot);
9467
9468         return 0;
9469 }
9470
9471 /**
9472  * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
9473  * @pdev: pointer to PCI device
9474  *
9475  * This routine is called from the kernel's PCI subsystem to support system
9476  * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
9477  * this method, it restores the device's PCI config space state and fully
9478  * reinitializes the device and brings it online. Note that as the driver
9479  * implements the minimum PM requirements to a power-aware driver's PM for
9480  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9481  * to the suspend() method call will be treated as SUSPEND and the driver
9482  * will fully reinitialize its device during resume() method call, the device
9483  * will be set to PCI_D0 directly in PCI config space before restoring the
9484  * state.
9485  *
9486  * Return code
9487  *      0 - driver suspended the device
9488  *      Error otherwise
9489  **/
9490 static int
9491 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
9492 {
9493         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9494         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9495         uint32_t intr_mode;
9496         int error;
9497
9498         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9499                         "0292 PCI device Power Management resume.\n");
9500
9501         /* Restore device state from PCI config space */
9502         pci_set_power_state(pdev, PCI_D0);
9503         pci_restore_state(pdev);
9504
9505         /*
9506          * As the new kernel behavior of pci_restore_state() API call clears
9507          * device saved_state flag, need to save the restored state again.
9508          */
9509         pci_save_state(pdev);
9510
9511         if (pdev->is_busmaster)
9512                 pci_set_master(pdev);
9513
9514          /* Startup the kernel thread for this host adapter. */
9515         phba->worker_thread = kthread_run(lpfc_do_work, phba,
9516                                         "lpfc_worker_%d", phba->brd_no);
9517         if (IS_ERR(phba->worker_thread)) {
9518                 error = PTR_ERR(phba->worker_thread);
9519                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9520                                 "0293 PM resume failed to start worker "
9521                                 "thread: error=x%x.\n", error);
9522                 return error;
9523         }
9524
9525         /* Configure and enable interrupt */
9526         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9527         if (intr_mode == LPFC_INTR_ERROR) {
9528                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9529                                 "0294 PM resume Failed to enable interrupt\n");
9530                 return -EIO;
9531         } else
9532                 phba->intr_mode = intr_mode;
9533
9534         /* Restart HBA and bring it online */
9535         lpfc_sli_brdrestart(phba);
9536         lpfc_online(phba);
9537
9538         /* Log the current active interrupt mode */
9539         lpfc_log_intr_mode(phba, phba->intr_mode);
9540
9541         return 0;
9542 }
9543
9544 /**
9545  * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
9546  * @phba: pointer to lpfc hba data structure.
9547  *
9548  * This routine is called to prepare the SLI4 device for PCI slot recover. It
9549  * aborts all the outstanding SCSI I/Os to the pci device.
9550  **/
9551 static void
9552 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
9553 {
9554         struct lpfc_sli *psli = &phba->sli;
9555         struct lpfc_sli_ring  *pring;
9556
9557         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9558                         "2828 PCI channel I/O abort preparing for recovery\n");
9559         /*
9560          * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9561          * and let the SCSI mid-layer to retry them to recover.
9562          */
9563         pring = &psli->ring[psli->fcp_ring];
9564         lpfc_sli_abort_iocb_ring(phba, pring);
9565 }
9566
9567 /**
9568  * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
9569  * @phba: pointer to lpfc hba data structure.
9570  *
9571  * This routine is called to prepare the SLI4 device for PCI slot reset. It
9572  * disables the device interrupt and pci device, and aborts the internal FCP
9573  * pending I/Os.
9574  **/
9575 static void
9576 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9577 {
9578         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9579                         "2826 PCI channel disable preparing for reset\n");
9580
9581         /* Block any management I/Os to the device */
9582         lpfc_block_mgmt_io(phba);
9583
9584         /* Block all SCSI devices' I/Os on the host */
9585         lpfc_scsi_dev_block(phba);
9586
9587         /* stop all timers */
9588         lpfc_stop_hba_timers(phba);
9589
9590         /* Disable interrupt and pci device */
9591         lpfc_sli4_disable_intr(phba);
9592         lpfc_sli4_queue_destroy(phba);
9593         pci_disable_device(phba->pcidev);
9594
9595         /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9596         lpfc_sli_flush_fcp_rings(phba);
9597 }
9598
9599 /**
9600  * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
9601  * @phba: pointer to lpfc hba data structure.
9602  *
9603  * This routine is called to prepare the SLI4 device for PCI slot permanently
9604  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9605  * pending I/Os.
9606  **/
9607 static void
9608 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9609 {
9610         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9611                         "2827 PCI channel permanent disable for failure\n");
9612
9613         /* Block all SCSI devices' I/Os on the host */
9614         lpfc_scsi_dev_block(phba);
9615
9616         /* stop all timers */
9617         lpfc_stop_hba_timers(phba);
9618
9619         /* Clean up all driver's outstanding SCSI I/Os */
9620         lpfc_sli_flush_fcp_rings(phba);
9621 }
9622
9623 /**
9624  * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
9625  * @pdev: pointer to PCI device.
9626  * @state: the current PCI connection state.
9627  *
9628  * This routine is called from the PCI subsystem for error handling to device
9629  * with SLI-4 interface spec. This function is called by the PCI subsystem
9630  * after a PCI bus error affecting this device has been detected. When this
9631  * function is invoked, it will need to stop all the I/Os and interrupt(s)
9632  * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
9633  * for the PCI subsystem to perform proper recovery as desired.
9634  *
9635  * Return codes
9636  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9637  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9638  **/
9639 static pci_ers_result_t
9640 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
9641 {
9642         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9643         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9644
9645         switch (state) {
9646         case pci_channel_io_normal:
9647                 /* Non-fatal error, prepare for recovery */
9648                 lpfc_sli4_prep_dev_for_recover(phba);
9649                 return PCI_ERS_RESULT_CAN_RECOVER;
9650         case pci_channel_io_frozen:
9651                 /* Fatal error, prepare for slot reset */
9652                 lpfc_sli4_prep_dev_for_reset(phba);
9653                 return PCI_ERS_RESULT_NEED_RESET;
9654         case pci_channel_io_perm_failure:
9655                 /* Permanent failure, prepare for device down */
9656                 lpfc_sli4_prep_dev_for_perm_failure(phba);
9657                 return PCI_ERS_RESULT_DISCONNECT;
9658         default:
9659                 /* Unknown state, prepare and request slot reset */
9660                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9661                                 "2825 Unknown PCI error state: x%x\n", state);
9662                 lpfc_sli4_prep_dev_for_reset(phba);
9663                 return PCI_ERS_RESULT_NEED_RESET;
9664         }
9665 }
9666
9667 /**
9668  * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
9669  * @pdev: pointer to PCI device.
9670  *
9671  * This routine is called from the PCI subsystem for error handling to device
9672  * with SLI-4 interface spec. It is called after PCI bus has been reset to
9673  * restart the PCI card from scratch, as if from a cold-boot. During the
9674  * PCI subsystem error recovery, after the driver returns
9675  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9676  * recovery and then call this routine before calling the .resume method to
9677  * recover the device. This function will initialize the HBA device, enable
9678  * the interrupt, but it will just put the HBA to offline state without
9679  * passing any I/O traffic.
9680  *
9681  * Return codes
9682  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
9683  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9684  */
9685 static pci_ers_result_t
9686 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
9687 {
9688         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9689         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9690         struct lpfc_sli *psli = &phba->sli;
9691         uint32_t intr_mode;
9692
9693         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9694         if (pci_enable_device_mem(pdev)) {
9695                 printk(KERN_ERR "lpfc: Cannot re-enable "
9696                         "PCI device after reset.\n");
9697                 return PCI_ERS_RESULT_DISCONNECT;
9698         }
9699
9700         pci_restore_state(pdev);
9701
9702         /*
9703          * As the new kernel behavior of pci_restore_state() API call clears
9704          * device saved_state flag, need to save the restored state again.
9705          */
9706         pci_save_state(pdev);
9707
9708         if (pdev->is_busmaster)
9709                 pci_set_master(pdev);
9710
9711         spin_lock_irq(&phba->hbalock);
9712         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9713         spin_unlock_irq(&phba->hbalock);
9714
9715         /* Configure and enable interrupt */
9716         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9717         if (intr_mode == LPFC_INTR_ERROR) {
9718                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9719                                 "2824 Cannot re-enable interrupt after "
9720                                 "slot reset.\n");
9721                 return PCI_ERS_RESULT_DISCONNECT;
9722         } else
9723                 phba->intr_mode = intr_mode;
9724
9725         /* Log the current active interrupt mode */
9726         lpfc_log_intr_mode(phba, phba->intr_mode);
9727
9728         return PCI_ERS_RESULT_RECOVERED;
9729 }
9730
9731 /**
9732  * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
9733  * @pdev: pointer to PCI device
9734  *
9735  * This routine is called from the PCI subsystem for error handling to device
9736  * with SLI-4 interface spec. It is called when kernel error recovery tells
9737  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9738  * error recovery. After this call, traffic can start to flow from this device
9739  * again.
9740  **/
9741 static void
9742 lpfc_io_resume_s4(struct pci_dev *pdev)
9743 {
9744         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9745         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9746
9747         /*
9748          * In case of slot reset, as function reset is performed through
9749          * mailbox command which needs DMA to be enabled, this operation
9750          * has to be moved to the io resume phase. Taking device offline
9751          * will perform the necessary cleanup.
9752          */
9753         if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
9754                 /* Perform device reset */
9755                 lpfc_offline_prep(phba);
9756                 lpfc_offline(phba);
9757                 lpfc_sli_brdrestart(phba);
9758                 /* Bring the device back online */
9759                 lpfc_online(phba);
9760         }
9761
9762         /* Clean up Advanced Error Reporting (AER) if needed */
9763         if (phba->hba_flag & HBA_AER_ENABLED)
9764                 pci_cleanup_aer_uncorrect_error_status(pdev);
9765 }
9766
9767 /**
9768  * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
9769  * @pdev: pointer to PCI device
9770  * @pid: pointer to PCI device identifier
9771  *
9772  * This routine is to be registered to the kernel's PCI subsystem. When an
9773  * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
9774  * at PCI device-specific information of the device and driver to see if the
9775  * driver state that it can support this kind of device. If the match is
9776  * successful, the driver core invokes this routine. This routine dispatches
9777  * the action to the proper SLI-3 or SLI-4 device probing routine, which will
9778  * do all the initialization that it needs to do to handle the HBA device
9779  * properly.
9780  *
9781  * Return code
9782  *      0 - driver can claim the device
9783  *      negative value - driver can not claim the device
9784  **/
9785 static int __devinit
9786 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
9787 {
9788         int rc;
9789         struct lpfc_sli_intf intf;
9790
9791         if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
9792                 return -ENODEV;
9793
9794         if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
9795             (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
9796                 rc = lpfc_pci_probe_one_s4(pdev, pid);
9797         else
9798                 rc = lpfc_pci_probe_one_s3(pdev, pid);
9799
9800         return rc;
9801 }
9802
9803 /**
9804  * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
9805  * @pdev: pointer to PCI device
9806  *
9807  * This routine is to be registered to the kernel's PCI subsystem. When an
9808  * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
9809  * This routine dispatches the action to the proper SLI-3 or SLI-4 device
9810  * remove routine, which will perform all the necessary cleanup for the
9811  * device to be removed from the PCI subsystem properly.
9812  **/
9813 static void __devexit
9814 lpfc_pci_remove_one(struct pci_dev *pdev)
9815 {
9816         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9817         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9818
9819         switch (phba->pci_dev_grp) {
9820         case LPFC_PCI_DEV_LP:
9821                 lpfc_pci_remove_one_s3(pdev);
9822                 break;
9823         case LPFC_PCI_DEV_OC:
9824                 lpfc_pci_remove_one_s4(pdev);
9825                 break;
9826         default:
9827                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9828                                 "1424 Invalid PCI device group: 0x%x\n",
9829                                 phba->pci_dev_grp);
9830                 break;
9831         }
9832         return;
9833 }
9834
9835 /**
9836  * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
9837  * @pdev: pointer to PCI device
9838  * @msg: power management message
9839  *
9840  * This routine is to be registered to the kernel's PCI subsystem to support
9841  * system Power Management (PM). When PM invokes this method, it dispatches
9842  * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
9843  * suspend the device.
9844  *
9845  * Return code
9846  *      0 - driver suspended the device
9847  *      Error otherwise
9848  **/
9849 static int
9850 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
9851 {
9852         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9853         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9854         int rc = -ENODEV;
9855
9856         switch (phba->pci_dev_grp) {
9857         case LPFC_PCI_DEV_LP:
9858                 rc = lpfc_pci_suspend_one_s3(pdev, msg);
9859                 break;
9860         case LPFC_PCI_DEV_OC:
9861                 rc = lpfc_pci_suspend_one_s4(pdev, msg);
9862                 break;
9863         default:
9864                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9865                                 "1425 Invalid PCI device group: 0x%x\n",
9866                                 phba->pci_dev_grp);
9867                 break;
9868         }
9869         return rc;
9870 }
9871
9872 /**
9873  * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
9874  * @pdev: pointer to PCI device
9875  *
9876  * This routine is to be registered to the kernel's PCI subsystem to support
9877  * system Power Management (PM). When PM invokes this method, it dispatches
9878  * the action to the proper SLI-3 or SLI-4 device resume routine, which will
9879  * resume the device.
9880  *
9881  * Return code
9882  *      0 - driver suspended the device
9883  *      Error otherwise
9884  **/
9885 static int
9886 lpfc_pci_resume_one(struct pci_dev *pdev)
9887 {
9888         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9889         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9890         int rc = -ENODEV;
9891
9892         switch (phba->pci_dev_grp) {
9893         case LPFC_PCI_DEV_LP:
9894                 rc = lpfc_pci_resume_one_s3(pdev);
9895                 break;
9896         case LPFC_PCI_DEV_OC:
9897                 rc = lpfc_pci_resume_one_s4(pdev);
9898                 break;
9899         default:
9900                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9901                                 "1426 Invalid PCI device group: 0x%x\n",
9902                                 phba->pci_dev_grp);
9903                 break;
9904         }
9905         return rc;
9906 }
9907
9908 /**
9909  * lpfc_io_error_detected - lpfc method for handling PCI I/O error
9910  * @pdev: pointer to PCI device.
9911  * @state: the current PCI connection state.
9912  *
9913  * This routine is registered to the PCI subsystem for error handling. This
9914  * function is called by the PCI subsystem after a PCI bus error affecting
9915  * this device has been detected. When this routine is invoked, it dispatches
9916  * the action to the proper SLI-3 or SLI-4 device error detected handling
9917  * routine, which will perform the proper error detected operation.
9918  *
9919  * Return codes
9920  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9921  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9922  **/
9923 static pci_ers_result_t
9924 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9925 {
9926         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9927         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9928         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9929
9930         switch (phba->pci_dev_grp) {
9931         case LPFC_PCI_DEV_LP:
9932                 rc = lpfc_io_error_detected_s3(pdev, state);
9933                 break;
9934         case LPFC_PCI_DEV_OC:
9935                 rc = lpfc_io_error_detected_s4(pdev, state);
9936                 break;
9937         default:
9938                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9939                                 "1427 Invalid PCI device group: 0x%x\n",
9940                                 phba->pci_dev_grp);
9941                 break;
9942         }
9943         return rc;
9944 }
9945
9946 /**
9947  * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
9948  * @pdev: pointer to PCI device.
9949  *
9950  * This routine is registered to the PCI subsystem for error handling. This
9951  * function is called after PCI bus has been reset to restart the PCI card
9952  * from scratch, as if from a cold-boot. When this routine is invoked, it
9953  * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
9954  * routine, which will perform the proper device reset.
9955  *
9956  * Return codes
9957  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
9958  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9959  **/
9960 static pci_ers_result_t
9961 lpfc_io_slot_reset(struct pci_dev *pdev)
9962 {
9963         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9964         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9965         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9966
9967         switch (phba->pci_dev_grp) {
9968         case LPFC_PCI_DEV_LP:
9969                 rc = lpfc_io_slot_reset_s3(pdev);
9970                 break;
9971         case LPFC_PCI_DEV_OC:
9972                 rc = lpfc_io_slot_reset_s4(pdev);
9973                 break;
9974         default:
9975                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9976                                 "1428 Invalid PCI device group: 0x%x\n",
9977                                 phba->pci_dev_grp);
9978                 break;
9979         }
9980         return rc;
9981 }
9982
9983 /**
9984  * lpfc_io_resume - lpfc method for resuming PCI I/O operation
9985  * @pdev: pointer to PCI device
9986  *
9987  * This routine is registered to the PCI subsystem for error handling. It
9988  * is called when kernel error recovery tells the lpfc driver that it is
9989  * OK to resume normal PCI operation after PCI bus error recovery. When
9990  * this routine is invoked, it dispatches the action to the proper SLI-3
9991  * or SLI-4 device io_resume routine, which will resume the device operation.
9992  **/
9993 static void
9994 lpfc_io_resume(struct pci_dev *pdev)
9995 {
9996         struct Scsi_Host *shost = pci_get_drvdata(pdev);
9997         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9998
9999         switch (phba->pci_dev_grp) {
10000         case LPFC_PCI_DEV_LP:
10001                 lpfc_io_resume_s3(pdev);
10002                 break;
10003         case LPFC_PCI_DEV_OC:
10004                 lpfc_io_resume_s4(pdev);
10005                 break;
10006         default:
10007                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10008                                 "1429 Invalid PCI device group: 0x%x\n",
10009                                 phba->pci_dev_grp);
10010                 break;
10011         }
10012         return;
10013 }
10014
10015 static struct pci_device_id lpfc_id_table[] = {
10016         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
10017                 PCI_ANY_ID, PCI_ANY_ID, },
10018         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
10019                 PCI_ANY_ID, PCI_ANY_ID, },
10020         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
10021                 PCI_ANY_ID, PCI_ANY_ID, },
10022         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
10023                 PCI_ANY_ID, PCI_ANY_ID, },
10024         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
10025                 PCI_ANY_ID, PCI_ANY_ID, },
10026         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
10027                 PCI_ANY_ID, PCI_ANY_ID, },
10028         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
10029                 PCI_ANY_ID, PCI_ANY_ID, },
10030         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
10031                 PCI_ANY_ID, PCI_ANY_ID, },
10032         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
10033                 PCI_ANY_ID, PCI_ANY_ID, },
10034         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
10035                 PCI_ANY_ID, PCI_ANY_ID, },
10036         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
10037                 PCI_ANY_ID, PCI_ANY_ID, },
10038         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
10039                 PCI_ANY_ID, PCI_ANY_ID, },
10040         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
10041                 PCI_ANY_ID, PCI_ANY_ID, },
10042         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
10043                 PCI_ANY_ID, PCI_ANY_ID, },
10044         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
10045                 PCI_ANY_ID, PCI_ANY_ID, },
10046         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
10047                 PCI_ANY_ID, PCI_ANY_ID, },
10048         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
10049                 PCI_ANY_ID, PCI_ANY_ID, },
10050         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
10051                 PCI_ANY_ID, PCI_ANY_ID, },
10052         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
10053                 PCI_ANY_ID, PCI_ANY_ID, },
10054         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
10055                 PCI_ANY_ID, PCI_ANY_ID, },
10056         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
10057                 PCI_ANY_ID, PCI_ANY_ID, },
10058         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
10059                 PCI_ANY_ID, PCI_ANY_ID, },
10060         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
10061                 PCI_ANY_ID, PCI_ANY_ID, },
10062         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
10063                 PCI_ANY_ID, PCI_ANY_ID, },
10064         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
10065                 PCI_ANY_ID, PCI_ANY_ID, },
10066         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
10067                 PCI_ANY_ID, PCI_ANY_ID, },
10068         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
10069                 PCI_ANY_ID, PCI_ANY_ID, },
10070         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
10071                 PCI_ANY_ID, PCI_ANY_ID, },
10072         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
10073                 PCI_ANY_ID, PCI_ANY_ID, },
10074         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
10075                 PCI_ANY_ID, PCI_ANY_ID, },
10076         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
10077                 PCI_ANY_ID, PCI_ANY_ID, },
10078         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
10079                 PCI_ANY_ID, PCI_ANY_ID, },
10080         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
10081                 PCI_ANY_ID, PCI_ANY_ID, },
10082         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
10083                 PCI_ANY_ID, PCI_ANY_ID, },
10084         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
10085                 PCI_ANY_ID, PCI_ANY_ID, },
10086         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
10087                 PCI_ANY_ID, PCI_ANY_ID, },
10088         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
10089                 PCI_ANY_ID, PCI_ANY_ID, },
10090         {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
10091                 PCI_ANY_ID, PCI_ANY_ID, },
10092         {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
10093                 PCI_ANY_ID, PCI_ANY_ID, },
10094         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
10095                 PCI_ANY_ID, PCI_ANY_ID, },
10096         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
10097                 PCI_ANY_ID, PCI_ANY_ID, },
10098         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
10099                 PCI_ANY_ID, PCI_ANY_ID, },
10100         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
10101                 PCI_ANY_ID, PCI_ANY_ID, },
10102         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
10103                 PCI_ANY_ID, PCI_ANY_ID, },
10104         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
10105                 PCI_ANY_ID, PCI_ANY_ID, },
10106         { 0 }
10107 };
10108
10109 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
10110
10111 static struct pci_error_handlers lpfc_err_handler = {
10112         .error_detected = lpfc_io_error_detected,
10113         .slot_reset = lpfc_io_slot_reset,
10114         .resume = lpfc_io_resume,
10115 };
10116
10117 static struct pci_driver lpfc_driver = {
10118         .name           = LPFC_DRIVER_NAME,
10119         .id_table       = lpfc_id_table,
10120         .probe          = lpfc_pci_probe_one,
10121         .remove         = __devexit_p(lpfc_pci_remove_one),
10122         .suspend        = lpfc_pci_suspend_one,
10123         .resume         = lpfc_pci_resume_one,
10124         .err_handler    = &lpfc_err_handler,
10125 };
10126
10127 /**
10128  * lpfc_init - lpfc module initialization routine
10129  *
10130  * This routine is to be invoked when the lpfc module is loaded into the
10131  * kernel. The special kernel macro module_init() is used to indicate the
10132  * role of this routine to the kernel as lpfc module entry point.
10133  *
10134  * Return codes
10135  *   0 - successful
10136  *   -ENOMEM - FC attach transport failed
10137  *   all others - failed
10138  */
10139 static int __init
10140 lpfc_init(void)
10141 {
10142         int error = 0;
10143
10144         printk(LPFC_MODULE_DESC "\n");
10145         printk(LPFC_COPYRIGHT "\n");
10146
10147         if (lpfc_enable_npiv) {
10148                 lpfc_transport_functions.vport_create = lpfc_vport_create;
10149                 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
10150         }
10151         lpfc_transport_template =
10152                                 fc_attach_transport(&lpfc_transport_functions);
10153         if (lpfc_transport_template == NULL)
10154                 return -ENOMEM;
10155         if (lpfc_enable_npiv) {
10156                 lpfc_vport_transport_template =
10157                         fc_attach_transport(&lpfc_vport_transport_functions);
10158                 if (lpfc_vport_transport_template == NULL) {
10159                         fc_release_transport(lpfc_transport_template);
10160                         return -ENOMEM;
10161                 }
10162         }
10163         error = pci_register_driver(&lpfc_driver);
10164         if (error) {
10165                 fc_release_transport(lpfc_transport_template);
10166                 if (lpfc_enable_npiv)
10167                         fc_release_transport(lpfc_vport_transport_template);
10168         }
10169
10170         return error;
10171 }
10172
10173 /**
10174  * lpfc_exit - lpfc module removal routine
10175  *
10176  * This routine is invoked when the lpfc module is removed from the kernel.
10177  * The special kernel macro module_exit() is used to indicate the role of
10178  * this routine to the kernel as lpfc module exit point.
10179  */
10180 static void __exit
10181 lpfc_exit(void)
10182 {
10183         pci_unregister_driver(&lpfc_driver);
10184         fc_release_transport(lpfc_transport_template);
10185         if (lpfc_enable_npiv)
10186                 fc_release_transport(lpfc_vport_transport_template);
10187         if (_dump_buf_data) {
10188                 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
10189                                 "_dump_buf_data at 0x%p\n",
10190                                 (1L << _dump_buf_data_order), _dump_buf_data);
10191                 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
10192         }
10193
10194         if (_dump_buf_dif) {
10195                 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
10196                                 "_dump_buf_dif at 0x%p\n",
10197                                 (1L << _dump_buf_dif_order), _dump_buf_dif);
10198                 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
10199         }
10200 }
10201
10202 module_init(lpfc_init);
10203 module_exit(lpfc_exit);
10204 MODULE_LICENSE("GPL");
10205 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
10206 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
10207 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);