Merge tag 'upstream-4.9-rc1' of git://git.infradead.org/linux-ubifs
[cascardo/linux.git] / drivers / scsi / snic / snic_io.c
1 /*
2  * Copyright 2014 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This program is free software; you may redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15  * SOFTWARE.
16  */
17
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21
22 #include <linux/interrupt.h>
23 #include <linux/workqueue.h>
24 #include <linux/spinlock.h>
25 #include <linux/mempool.h>
26 #include <scsi/scsi_tcq.h>
27
28 #include "snic_io.h"
29 #include "snic.h"
30 #include "cq_enet_desc.h"
31 #include "snic_fwint.h"
32
33 static void
34 snic_wq_cmpl_frame_send(struct vnic_wq *wq,
35                             struct cq_desc *cq_desc,
36                             struct vnic_wq_buf *buf,
37                             void *opaque)
38 {
39         struct snic *snic = svnic_dev_priv(wq->vdev);
40
41         SNIC_BUG_ON(buf->os_buf == NULL);
42
43         if (snic_log_level & SNIC_DESC_LOGGING)
44                 SNIC_HOST_INFO(snic->shost,
45                                "Ack received for snic_host_req %p.\n",
46                                buf->os_buf);
47
48         SNIC_TRC(snic->shost->host_no, 0, 0,
49                  ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0,
50                  0);
51
52         buf->os_buf = NULL;
53 }
54
55 static int
56 snic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
57                           struct cq_desc *cq_desc,
58                           u8 type,
59                           u16 q_num,
60                           u16 cmpl_idx,
61                           void *opaque)
62 {
63         struct snic *snic = svnic_dev_priv(vdev);
64         unsigned long flags;
65
66         SNIC_BUG_ON(q_num != 0);
67
68         spin_lock_irqsave(&snic->wq_lock[q_num], flags);
69         svnic_wq_service(&snic->wq[q_num],
70                          cq_desc,
71                          cmpl_idx,
72                          snic_wq_cmpl_frame_send,
73                          NULL);
74         spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
75
76         return 0;
77 } /* end of snic_cmpl_handler_cont */
78
79 int
80 snic_wq_cmpl_handler(struct snic *snic, int work_to_do)
81 {
82         unsigned int work_done = 0;
83         unsigned int i;
84
85         snic->s_stats.misc.last_ack_time = jiffies;
86         for (i = 0; i < snic->wq_count; i++) {
87                 work_done += svnic_cq_service(&snic->cq[i],
88                                               work_to_do,
89                                               snic_wq_cmpl_handler_cont,
90                                               NULL);
91         }
92
93         return work_done;
94 } /* end of snic_wq_cmpl_handler */
95
96 void
97 snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
98 {
99
100         struct snic_host_req *req = buf->os_buf;
101         struct snic *snic = svnic_dev_priv(wq->vdev);
102         struct snic_req_info *rqi = NULL;
103         unsigned long flags;
104
105         pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
106
107         rqi = req_to_rqi(req);
108         spin_lock_irqsave(&snic->spl_cmd_lock, flags);
109         if (list_empty(&rqi->list)) {
110                 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
111                 goto end;
112         }
113
114         SNIC_BUG_ON(rqi->list.next == NULL); /* if not added to spl_cmd_list */
115         list_del_init(&rqi->list);
116         spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
117
118         if (rqi->sge_va) {
119                 snic_pci_unmap_rsp_buf(snic, rqi);
120                 kfree((void *)rqi->sge_va);
121                 rqi->sge_va = 0;
122         }
123         snic_req_free(snic, rqi);
124         SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n");
125
126 end:
127         return;
128 }
129
130 /* Criteria to select work queue in multi queue mode */
131 static int
132 snic_select_wq(struct snic *snic)
133 {
134         /* No multi queue support for now */
135         BUILD_BUG_ON(SNIC_WQ_MAX > 1);
136
137         return 0;
138 }
139
140 static int
141 snic_wqdesc_avail(struct snic *snic, int q_num, int req_type)
142 {
143         int nr_wqdesc = snic->config.wq_enet_desc_count;
144
145         if (q_num > 0) {
146                 /*
147                  * Multi Queue case, additional care is required.
148                  * Per WQ active requests need to be maintained.
149                  */
150                 SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n");
151                 SNIC_BUG_ON(q_num > 0);
152
153                 return -1;
154         }
155
156         nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs);
157
158         return ((req_type == SNIC_REQ_HBA_RESET) ? nr_wqdesc : nr_wqdesc - 1);
159 }
160
161 int
162 snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
163 {
164         dma_addr_t pa = 0;
165         unsigned long flags;
166         struct snic_fw_stats *fwstats = &snic->s_stats.fw;
167         struct snic_host_req *req = (struct snic_host_req *) os_buf;
168         long act_reqs;
169         long desc_avail = 0;
170         int q_num = 0;
171
172         snic_print_desc(__func__, os_buf, len);
173
174         /* Map request buffer */
175         pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE);
176         if (pci_dma_mapping_error(snic->pdev, pa)) {
177                 SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
178
179                 return -ENOMEM;
180         }
181
182         req->req_pa = (ulong)pa;
183
184         q_num = snic_select_wq(snic);
185
186         spin_lock_irqsave(&snic->wq_lock[q_num], flags);
187         desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
188         if (desc_avail <= 0) {
189                 pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
190                 req->req_pa = 0;
191                 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
192                 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
193                 SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no);
194
195                 return -ENOMEM;
196         }
197
198         snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1);
199         /*
200          * Update stats
201          * note: when multi queue enabled, fw actv_reqs should be per queue.
202          */
203         act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
204         spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
205
206         if (act_reqs > atomic64_read(&fwstats->max_actv_reqs))
207                 atomic64_set(&fwstats->max_actv_reqs, act_reqs);
208
209         return 0;
210 } /* end of snic_queue_wq_desc() */
211
212 /*
213  * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
214  * Purpose : Used during driver unload to clean up the requests.
215  */
216 void
217 snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi)
218 {
219         unsigned long flags;
220
221         INIT_LIST_HEAD(&rqi->list);
222
223         spin_lock_irqsave(&snic->spl_cmd_lock, flags);
224         list_add_tail(&rqi->list, &snic->spl_cmd_list);
225         spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
226 }
227
228 /*
229  * snic_req_init:
230  * Allocates snic_req_info + snic_host_req + sgl data, and initializes.
231  */
232 struct snic_req_info *
233 snic_req_init(struct snic *snic, int sg_cnt)
234 {
235         u8 typ;
236         struct snic_req_info *rqi = NULL;
237
238         typ = (sg_cnt <= SNIC_REQ_CACHE_DFLT_SGL) ?
239                 SNIC_REQ_CACHE_DFLT_SGL : SNIC_REQ_CACHE_MAX_SGL;
240
241         rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC);
242         if (!rqi) {
243                 atomic64_inc(&snic->s_stats.io.alloc_fail);
244                 SNIC_HOST_ERR(snic->shost,
245                               "Failed to allocate memory from snic req pool id = %d\n",
246                               typ);
247                 return rqi;
248         }
249
250         memset(rqi, 0, sizeof(*rqi));
251         rqi->rq_pool_type = typ;
252         rqi->start_time = jiffies;
253         rqi->req = (struct snic_host_req *) (rqi + 1);
254         rqi->req_len = sizeof(struct snic_host_req);
255         rqi->snic = snic;
256
257         rqi->req = (struct snic_host_req *)(rqi + 1);
258
259         if (sg_cnt == 0)
260                 goto end;
261
262         rqi->req_len += (sg_cnt * sizeof(struct snic_sg_desc));
263
264         if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl))
265                 atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt);
266
267         SNIC_BUG_ON(sg_cnt > SNIC_MAX_SG_DESC_CNT);
268         atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]);
269
270 end:
271         memset(rqi->req, 0, rqi->req_len);
272
273         /* pre initialization of init_ctx to support req_to_rqi */
274         rqi->req->hdr.init_ctx = (ulong) rqi;
275
276         SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi);
277
278         return rqi;
279 } /* end of snic_req_init */
280
281 /*
282  * snic_abort_req_init : Inits abort request.
283  */
284 struct snic_host_req *
285 snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi)
286 {
287         struct snic_host_req *req = NULL;
288
289         SNIC_BUG_ON(!rqi);
290
291         /* If abort to be issued second time, then reuse */
292         if (rqi->abort_req)
293                 return rqi->abort_req;
294
295
296         req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
297         if (!req) {
298                 SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n");
299                 WARN_ON_ONCE(1);
300
301                 return NULL;
302         }
303
304         rqi->abort_req = req;
305         memset(req, 0, sizeof(struct snic_host_req));
306         /* pre initialization of init_ctx to support req_to_rqi */
307         req->hdr.init_ctx = (ulong) rqi;
308
309         return req;
310 } /* end of snic_abort_req_init */
311
312 /*
313  * snic_dr_req_init : Inits device reset req
314  */
315 struct snic_host_req *
316 snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi)
317 {
318         struct snic_host_req *req = NULL;
319
320         SNIC_BUG_ON(!rqi);
321
322         req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
323         if (!req) {
324                 SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n");
325                 WARN_ON_ONCE(1);
326
327                 return NULL;
328         }
329
330         SNIC_BUG_ON(rqi->dr_req != NULL);
331         rqi->dr_req = req;
332         memset(req, 0, sizeof(struct snic_host_req));
333         /* pre initialization of init_ctx to support req_to_rqi */
334         req->hdr.init_ctx = (ulong) rqi;
335
336         return req;
337 } /* end of snic_dr_req_init */
338
339 /* frees snic_req_info and snic_host_req */
340 void
341 snic_req_free(struct snic *snic, struct snic_req_info *rqi)
342 {
343         SNIC_BUG_ON(rqi->req == rqi->abort_req);
344         SNIC_BUG_ON(rqi->req == rqi->dr_req);
345         SNIC_BUG_ON(rqi->sge_va != 0);
346
347         SNIC_SCSI_DBG(snic->shost,
348                       "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
349                       rqi, rqi->req, rqi->abort_req, rqi->dr_req);
350
351         if (rqi->abort_req) {
352                 if (rqi->abort_req->req_pa)
353                         pci_unmap_single(snic->pdev,
354                                          rqi->abort_req->req_pa,
355                                          sizeof(struct snic_host_req),
356                                          PCI_DMA_TODEVICE);
357
358                 mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
359         }
360
361         if (rqi->dr_req) {
362                 if (rqi->dr_req->req_pa)
363                         pci_unmap_single(snic->pdev,
364                                          rqi->dr_req->req_pa,
365                                          sizeof(struct snic_host_req),
366                                          PCI_DMA_TODEVICE);
367
368                 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
369         }
370
371         if (rqi->req->req_pa)
372                 pci_unmap_single(snic->pdev,
373                                  rqi->req->req_pa,
374                                  rqi->req_len,
375                                  PCI_DMA_TODEVICE);
376
377         mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
378 }
379
380 void
381 snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
382 {
383         struct snic_sg_desc *sgd;
384
385         sgd = req_to_sgl(rqi_to_req(rqi));
386         SNIC_BUG_ON(sgd[0].addr == 0);
387         pci_unmap_single(snic->pdev,
388                          le64_to_cpu(sgd[0].addr),
389                          le32_to_cpu(sgd[0].len),
390                          PCI_DMA_FROMDEVICE);
391 }
392
393 /*
394  * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
395  */
396 void
397 snic_free_all_untagged_reqs(struct snic *snic)
398 {
399         struct snic_req_info *rqi;
400         struct list_head *cur, *nxt;
401         unsigned long flags;
402
403         spin_lock_irqsave(&snic->spl_cmd_lock, flags);
404         list_for_each_safe(cur, nxt, &snic->spl_cmd_list) {
405                 rqi = list_entry(cur, struct snic_req_info, list);
406                 list_del_init(&rqi->list);
407                 if (rqi->sge_va) {
408                         snic_pci_unmap_rsp_buf(snic, rqi);
409                         kfree((void *)rqi->sge_va);
410                         rqi->sge_va = 0;
411                 }
412
413                 snic_req_free(snic, rqi);
414         }
415         spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
416 }
417
418 /*
419  * snic_release_untagged_req : Unlinks the untagged req and frees it.
420  */
421 void
422 snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi)
423 {
424         unsigned long flags;
425
426         spin_lock_irqsave(&snic->snic_lock, flags);
427         if (snic->in_remove) {
428                 spin_unlock_irqrestore(&snic->snic_lock, flags);
429                 goto end;
430         }
431         spin_unlock_irqrestore(&snic->snic_lock, flags);
432
433         spin_lock_irqsave(&snic->spl_cmd_lock, flags);
434         if (list_empty(&rqi->list)) {
435                 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
436                 goto end;
437         }
438         list_del_init(&rqi->list);
439         spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
440         snic_req_free(snic, rqi);
441
442 end:
443         return;
444 }
445
446 /* dump buf in hex fmt */
447 void
448 snic_hex_dump(char *pfx, char *data, int len)
449 {
450         SNIC_INFO("%s Dumping Data of Len = %d\n", pfx, len);
451         print_hex_dump_bytes(pfx, DUMP_PREFIX_NONE, data, len);
452 }
453
454 #define LINE_BUFSZ      128     /* for snic_print_desc fn */
455 static void
456 snic_dump_desc(const char *fn, char *os_buf, int len)
457 {
458         struct snic_host_req *req = (struct snic_host_req *) os_buf;
459         struct snic_fw_req *fwreq = (struct snic_fw_req *) os_buf;
460         struct snic_req_info *rqi = NULL;
461         char line[LINE_BUFSZ] = { '\0' };
462         char *cmd_str = NULL;
463
464         if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL)
465                 rqi = (struct snic_req_info *) fwreq->hdr.init_ctx;
466         else
467                 rqi = (struct snic_req_info *) req->hdr.init_ctx;
468
469         SNIC_BUG_ON(rqi == NULL || rqi->req == NULL);
470         switch (req->hdr.type) {
471         case SNIC_REQ_REPORT_TGTS:
472                 cmd_str = "report-tgt : ";
473                 snprintf(line, LINE_BUFSZ, "SNIC_REQ_REPORT_TGTS :");
474                 break;
475
476         case SNIC_REQ_ICMND:
477                 cmd_str = "icmnd : ";
478                 snprintf(line, LINE_BUFSZ, "SNIC_REQ_ICMND : 0x%x :",
479                          req->u.icmnd.cdb[0]);
480                 break;
481
482         case SNIC_REQ_ITMF:
483                 cmd_str = "itmf : ";
484                 snprintf(line, LINE_BUFSZ, "SNIC_REQ_ITMF :");
485                 break;
486
487         case SNIC_REQ_HBA_RESET:
488                 cmd_str = "hba reset :";
489                 snprintf(line, LINE_BUFSZ, "SNIC_REQ_HBA_RESET :");
490                 break;
491
492         case SNIC_REQ_EXCH_VER:
493                 cmd_str = "exch ver : ";
494                 snprintf(line, LINE_BUFSZ, "SNIC_REQ_EXCH_VER :");
495                 break;
496
497         case SNIC_REQ_TGT_INFO:
498                 cmd_str = "tgt info : ";
499                 break;
500
501         case SNIC_RSP_REPORT_TGTS_CMPL:
502                 cmd_str = "report tgt cmpl : ";
503                 snprintf(line, LINE_BUFSZ, "SNIC_RSP_REPORT_TGTS_CMPL :");
504                 break;
505
506         case SNIC_RSP_ICMND_CMPL:
507                 cmd_str = "icmnd_cmpl : ";
508                 snprintf(line, LINE_BUFSZ, "SNIC_RSP_ICMND_CMPL : 0x%x :",
509                          rqi->req->u.icmnd.cdb[0]);
510                 break;
511
512         case SNIC_RSP_ITMF_CMPL:
513                 cmd_str = "itmf_cmpl : ";
514                 snprintf(line, LINE_BUFSZ, "SNIC_RSP_ITMF_CMPL :");
515                 break;
516
517         case SNIC_RSP_HBA_RESET_CMPL:
518                 cmd_str = "hba_reset_cmpl : ";
519                 snprintf(line, LINE_BUFSZ, "SNIC_RSP_HBA_RESET_CMPL :");
520                 break;
521
522         case SNIC_RSP_EXCH_VER_CMPL:
523                 cmd_str = "exch_ver_cmpl : ";
524                 snprintf(line, LINE_BUFSZ, "SNIC_RSP_EXCH_VER_CMPL :");
525                 break;
526
527         case SNIC_MSG_ACK:
528                 cmd_str = "msg ack : ";
529                 snprintf(line, LINE_BUFSZ, "SNIC_MSG_ACK :");
530                 break;
531
532         case SNIC_MSG_ASYNC_EVNOTIFY:
533                 cmd_str = "async notify : ";
534                 snprintf(line, LINE_BUFSZ, "SNIC_MSG_ASYNC_EVNOTIFY :");
535                 break;
536
537         default:
538                 cmd_str = "unknown : ";
539                 SNIC_BUG_ON(1);
540                 break;
541         }
542
543         SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n",
544                   fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status,
545                   req->hdr.init_ctx);
546
547         /* Enable it, to dump byte stream */
548         if (snic_log_level & 0x20)
549                 snic_hex_dump(cmd_str, os_buf, len);
550 } /* end of __snic_print_desc */
551
552 void
553 snic_print_desc(const char *fn, char *os_buf, int len)
554 {
555         if (snic_log_level & SNIC_DESC_LOGGING)
556                 snic_dump_desc(fn, os_buf, len);
557 }
558
559 void
560 snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi)
561 {
562         u64 duration;
563
564         duration = jiffies - rqi->start_time;
565
566         if (duration > atomic64_read(&snic->s_stats.io.max_time))
567                 atomic64_set(&snic->s_stats.io.max_time, duration);
568 }