qed: Remove OOM messages
[cascardo/linux.git] / drivers / net / ethernet / qlogic / qed / qed_spq.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
21 #include "qed.h"
22 #include "qed_cxt.h"
23 #include "qed_dev_api.h"
24 #include "qed_hsi.h"
25 #include "qed_hw.h"
26 #include "qed_int.h"
27 #include "qed_mcp.h"
28 #include "qed_reg_addr.h"
29 #include "qed_sp.h"
30 #include "qed_sriov.h"
31
32 /***************************************************************************
33 * Structures & Definitions
34 ***************************************************************************/
35
36 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
37 #define SPQ_BLOCK_SLEEP_LENGTH          (1000)
38
39 /***************************************************************************
40 * Blocking Imp. (BLOCK/EBLOCK mode)
41 ***************************************************************************/
42 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
43                                 void *cookie,
44                                 union event_ring_data *data, u8 fw_return_code)
45 {
46         struct qed_spq_comp_done *comp_done;
47
48         comp_done = (struct qed_spq_comp_done *)cookie;
49
50         comp_done->done                 = 0x1;
51         comp_done->fw_return_code       = fw_return_code;
52
53         /* make update visible to waiting thread */
54         smp_wmb();
55 }
56
57 static int qed_spq_block(struct qed_hwfn *p_hwfn,
58                          struct qed_spq_entry *p_ent,
59                          u8 *p_fw_ret)
60 {
61         int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
62         struct qed_spq_comp_done *comp_done;
63         int rc;
64
65         comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
66         while (sleep_count) {
67                 /* validate we receive completion update */
68                 smp_rmb();
69                 if (comp_done->done == 1) {
70                         if (p_fw_ret)
71                                 *p_fw_ret = comp_done->fw_return_code;
72                         return 0;
73                 }
74                 usleep_range(5000, 10000);
75                 sleep_count--;
76         }
77
78         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
79         rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
80         if (rc != 0)
81                 DP_NOTICE(p_hwfn, "MCP drain failed\n");
82
83         /* Retry after drain */
84         sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
85         while (sleep_count) {
86                 /* validate we receive completion update */
87                 smp_rmb();
88                 if (comp_done->done == 1) {
89                         if (p_fw_ret)
90                                 *p_fw_ret = comp_done->fw_return_code;
91                         return 0;
92                 }
93                 usleep_range(5000, 10000);
94                 sleep_count--;
95         }
96
97         if (comp_done->done == 1) {
98                 if (p_fw_ret)
99                         *p_fw_ret = comp_done->fw_return_code;
100                 return 0;
101         }
102
103         DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
104
105         return -EBUSY;
106 }
107
108 /***************************************************************************
109 * SPQ entries inner API
110 ***************************************************************************/
111 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
112                               struct qed_spq_entry *p_ent)
113 {
114         p_ent->flags = 0;
115
116         switch (p_ent->comp_mode) {
117         case QED_SPQ_MODE_EBLOCK:
118         case QED_SPQ_MODE_BLOCK:
119                 p_ent->comp_cb.function = qed_spq_blocking_cb;
120                 break;
121         case QED_SPQ_MODE_CB:
122                 break;
123         default:
124                 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
125                           p_ent->comp_mode);
126                 return -EINVAL;
127         }
128
129         DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
130                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
131                    p_ent->elem.hdr.cid,
132                    p_ent->elem.hdr.cmd_id,
133                    p_ent->elem.hdr.protocol_id,
134                    p_ent->elem.data_ptr.hi,
135                    p_ent->elem.data_ptr.lo,
136                    D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
137                            QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
138                            "MODE_CB"));
139
140         return 0;
141 }
142
143 /***************************************************************************
144 * HSI access
145 ***************************************************************************/
146 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
147                                   struct qed_spq *p_spq)
148 {
149         u16                             pq;
150         struct qed_cxt_info             cxt_info;
151         struct core_conn_context        *p_cxt;
152         union qed_qm_pq_params          pq_params;
153         int                             rc;
154
155         cxt_info.iid = p_spq->cid;
156
157         rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
158
159         if (rc < 0) {
160                 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
161                           p_spq->cid);
162                 return;
163         }
164
165         p_cxt = cxt_info.p_cxt;
166
167         SET_FIELD(p_cxt->xstorm_ag_context.flags10,
168                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
169         SET_FIELD(p_cxt->xstorm_ag_context.flags1,
170                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
171         SET_FIELD(p_cxt->xstorm_ag_context.flags9,
172                   XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
173
174         /* QM physical queue */
175         memset(&pq_params, 0, sizeof(pq_params));
176         pq_params.core.tc = LB_TC;
177         pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
178         p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
179
180         p_cxt->xstorm_st_context.spq_base_lo =
181                 DMA_LO_LE(p_spq->chain.p_phys_addr);
182         p_cxt->xstorm_st_context.spq_base_hi =
183                 DMA_HI_LE(p_spq->chain.p_phys_addr);
184
185         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
186                        p_hwfn->p_consq->chain.p_phys_addr);
187 }
188
189 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
190                            struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
191 {
192         struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
193         u16 echo = qed_chain_get_prod_idx(p_chain);
194         struct slow_path_element        *elem;
195         struct core_db_data             db;
196
197         p_ent->elem.hdr.echo    = cpu_to_le16(echo);
198         elem = qed_chain_produce(p_chain);
199         if (!elem) {
200                 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
201                 return -EINVAL;
202         }
203
204         *elem = p_ent->elem; /* struct assignment */
205
206         /* send a doorbell on the slow hwfn session */
207         memset(&db, 0, sizeof(db));
208         SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
209         SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
210         SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
211                   DQ_XCM_CORE_SPQ_PROD_CMD);
212         db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
213         db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
214
215         /* make sure the SPQE is updated before the doorbell */
216         wmb();
217
218         DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
219
220         /* make sure doorbell is rang */
221         wmb();
222
223         DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
224                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
225                    qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
226                    p_spq->cid, db.params, db.agg_flags,
227                    qed_chain_get_prod_idx(p_chain));
228
229         return 0;
230 }
231
232 /***************************************************************************
233 * Asynchronous events
234 ***************************************************************************/
235 static int
236 qed_async_event_completion(struct qed_hwfn *p_hwfn,
237                            struct event_ring_entry *p_eqe)
238 {
239         switch (p_eqe->protocol_id) {
240         case PROTOCOLID_COMMON:
241                 return qed_sriov_eqe_event(p_hwfn,
242                                            p_eqe->opcode,
243                                            p_eqe->echo, &p_eqe->data);
244         default:
245                 DP_NOTICE(p_hwfn,
246                           "Unknown Async completion for protocol: %d\n",
247                           p_eqe->protocol_id);
248                 return -EINVAL;
249         }
250 }
251
252 /***************************************************************************
253 * EQ API
254 ***************************************************************************/
255 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
256 {
257         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
258                    USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
259
260         REG_WR16(p_hwfn, addr, prod);
261
262         /* keep prod updates ordered */
263         mmiowb();
264 }
265
266 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
267 {
268         struct qed_eq *p_eq = cookie;
269         struct qed_chain *p_chain = &p_eq->chain;
270         int rc = 0;
271
272         /* take a snapshot of the FW consumer */
273         u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
274
275         DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
276
277         /* Need to guarantee the fw_cons index we use points to a usuable
278          * element (to comply with our chain), so our macros would comply
279          */
280         if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
281             qed_chain_get_usable_per_page(p_chain))
282                 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
283
284         /* Complete current segment of eq entries */
285         while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
286                 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
287
288                 if (!p_eqe) {
289                         rc = -EINVAL;
290                         break;
291                 }
292
293                 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
294                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
295                            p_eqe->opcode,
296                            p_eqe->protocol_id,
297                            p_eqe->reserved0,
298                            le16_to_cpu(p_eqe->echo),
299                            p_eqe->fw_return_code,
300                            p_eqe->flags);
301
302                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
303                         if (qed_async_event_completion(p_hwfn, p_eqe))
304                                 rc = -EINVAL;
305                 } else if (qed_spq_completion(p_hwfn,
306                                               p_eqe->echo,
307                                               p_eqe->fw_return_code,
308                                               &p_eqe->data)) {
309                         rc = -EINVAL;
310                 }
311
312                 qed_chain_recycle_consumed(p_chain);
313         }
314
315         qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
316
317         return rc;
318 }
319
320 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
321 {
322         struct qed_eq *p_eq;
323
324         /* Allocate EQ struct */
325         p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
326         if (!p_eq)
327                 return NULL;
328
329         /* Allocate and initialize EQ chain*/
330         if (qed_chain_alloc(p_hwfn->cdev,
331                             QED_CHAIN_USE_TO_PRODUCE,
332                             QED_CHAIN_MODE_PBL,
333                             QED_CHAIN_CNT_TYPE_U16,
334                             num_elem,
335                             sizeof(union event_ring_element),
336                             &p_eq->chain))
337                 goto eq_allocate_fail;
338
339         /* register EQ completion on the SP SB */
340         qed_int_register_cb(p_hwfn, qed_eq_completion,
341                             p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
342
343         return p_eq;
344
345 eq_allocate_fail:
346         qed_eq_free(p_hwfn, p_eq);
347         return NULL;
348 }
349
350 void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
351 {
352         qed_chain_reset(&p_eq->chain);
353 }
354
355 void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
356 {
357         if (!p_eq)
358                 return;
359         qed_chain_free(p_hwfn->cdev, &p_eq->chain);
360         kfree(p_eq);
361 }
362
363 /***************************************************************************
364 * CQE API - manipulate EQ functionality
365 ***************************************************************************/
366 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
367                               struct eth_slow_path_rx_cqe *cqe,
368                               enum protocol_type protocol)
369 {
370         if (IS_VF(p_hwfn->cdev))
371                 return 0;
372
373         /* @@@tmp - it's possible we'll eventually want to handle some
374          * actual commands that can arrive here, but for now this is only
375          * used to complete the ramrod using the echo value on the cqe
376          */
377         return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
378 }
379
380 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
381                            struct eth_slow_path_rx_cqe *cqe)
382 {
383         int rc;
384
385         rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
386         if (rc)
387                 DP_NOTICE(p_hwfn,
388                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
389                           cqe->ramrod_cmd_id);
390
391         return rc;
392 }
393
394 /***************************************************************************
395 * Slow hwfn Queue (spq)
396 ***************************************************************************/
397 void qed_spq_setup(struct qed_hwfn *p_hwfn)
398 {
399         struct qed_spq *p_spq = p_hwfn->p_spq;
400         struct qed_spq_entry *p_virt = NULL;
401         dma_addr_t p_phys = 0;
402         u32 i, capacity;
403
404         INIT_LIST_HEAD(&p_spq->pending);
405         INIT_LIST_HEAD(&p_spq->completion_pending);
406         INIT_LIST_HEAD(&p_spq->free_pool);
407         INIT_LIST_HEAD(&p_spq->unlimited_pending);
408         spin_lock_init(&p_spq->lock);
409
410         /* SPQ empty pool */
411         p_phys  = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
412         p_virt  = p_spq->p_virt;
413
414         capacity = qed_chain_get_capacity(&p_spq->chain);
415         for (i = 0; i < capacity; i++) {
416                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
417
418                 list_add_tail(&p_virt->list, &p_spq->free_pool);
419
420                 p_virt++;
421                 p_phys += sizeof(struct qed_spq_entry);
422         }
423
424         /* Statistics */
425         p_spq->normal_count             = 0;
426         p_spq->comp_count               = 0;
427         p_spq->comp_sent_count          = 0;
428         p_spq->unlimited_pending_count  = 0;
429
430         bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
431         p_spq->comp_bitmap_idx = 0;
432
433         /* SPQ cid, cannot fail */
434         qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
435         qed_spq_hw_initialize(p_hwfn, p_spq);
436
437         /* reset the chain itself */
438         qed_chain_reset(&p_spq->chain);
439 }
440
441 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
442 {
443         struct qed_spq_entry *p_virt = NULL;
444         struct qed_spq *p_spq = NULL;
445         dma_addr_t p_phys = 0;
446         u32 capacity;
447
448         /* SPQ struct */
449         p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
450         if (!p_spq)
451                 return -ENOMEM;
452
453         /* SPQ ring  */
454         if (qed_chain_alloc(p_hwfn->cdev,
455                             QED_CHAIN_USE_TO_PRODUCE,
456                             QED_CHAIN_MODE_SINGLE,
457                             QED_CHAIN_CNT_TYPE_U16,
458                             0,   /* N/A when the mode is SINGLE */
459                             sizeof(struct slow_path_element),
460                             &p_spq->chain))
461                 goto spq_allocate_fail;
462
463         /* allocate and fill the SPQ elements (incl. ramrod data list) */
464         capacity = qed_chain_get_capacity(&p_spq->chain);
465         p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
466                                     capacity * sizeof(struct qed_spq_entry),
467                                     &p_phys, GFP_KERNEL);
468         if (!p_virt)
469                 goto spq_allocate_fail;
470
471         p_spq->p_virt = p_virt;
472         p_spq->p_phys = p_phys;
473         p_hwfn->p_spq = p_spq;
474
475         return 0;
476
477 spq_allocate_fail:
478         qed_chain_free(p_hwfn->cdev, &p_spq->chain);
479         kfree(p_spq);
480         return -ENOMEM;
481 }
482
483 void qed_spq_free(struct qed_hwfn *p_hwfn)
484 {
485         struct qed_spq *p_spq = p_hwfn->p_spq;
486         u32 capacity;
487
488         if (!p_spq)
489                 return;
490
491         if (p_spq->p_virt) {
492                 capacity = qed_chain_get_capacity(&p_spq->chain);
493                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
494                                   capacity *
495                                   sizeof(struct qed_spq_entry),
496                                   p_spq->p_virt, p_spq->p_phys);
497         }
498
499         qed_chain_free(p_hwfn->cdev, &p_spq->chain);
500         ;
501         kfree(p_spq);
502 }
503
504 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
505 {
506         struct qed_spq *p_spq = p_hwfn->p_spq;
507         struct qed_spq_entry *p_ent = NULL;
508         int rc = 0;
509
510         spin_lock_bh(&p_spq->lock);
511
512         if (list_empty(&p_spq->free_pool)) {
513                 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
514                 if (!p_ent) {
515                         DP_NOTICE(p_hwfn,
516                                   "Failed to allocate an SPQ entry for a pending ramrod\n");
517                         rc = -ENOMEM;
518                         goto out_unlock;
519                 }
520                 p_ent->queue = &p_spq->unlimited_pending;
521         } else {
522                 p_ent = list_first_entry(&p_spq->free_pool,
523                                          struct qed_spq_entry, list);
524                 list_del(&p_ent->list);
525                 p_ent->queue = &p_spq->pending;
526         }
527
528         *pp_ent = p_ent;
529
530 out_unlock:
531         spin_unlock_bh(&p_spq->lock);
532         return rc;
533 }
534
535 /* Locked variant; Should be called while the SPQ lock is taken */
536 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
537                                    struct qed_spq_entry *p_ent)
538 {
539         list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
540 }
541
542 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
543 {
544         spin_lock_bh(&p_hwfn->p_spq->lock);
545         __qed_spq_return_entry(p_hwfn, p_ent);
546         spin_unlock_bh(&p_hwfn->p_spq->lock);
547 }
548
549 /**
550  * @brief qed_spq_add_entry - adds a new entry to the pending
551  *        list. Should be used while lock is being held.
552  *
553  * Addes an entry to the pending list is there is room (en empty
554  * element is available in the free_pool), or else places the
555  * entry in the unlimited_pending pool.
556  *
557  * @param p_hwfn
558  * @param p_ent
559  * @param priority
560  *
561  * @return int
562  */
563 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
564                              struct qed_spq_entry *p_ent,
565                              enum spq_priority priority)
566 {
567         struct qed_spq *p_spq = p_hwfn->p_spq;
568
569         if (p_ent->queue == &p_spq->unlimited_pending) {
570
571                 if (list_empty(&p_spq->free_pool)) {
572                         list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
573                         p_spq->unlimited_pending_count++;
574
575                         return 0;
576                 } else {
577                         struct qed_spq_entry *p_en2;
578
579                         p_en2 = list_first_entry(&p_spq->free_pool,
580                                                  struct qed_spq_entry, list);
581                         list_del(&p_en2->list);
582
583                         /* Copy the ring element physical pointer to the new
584                          * entry, since we are about to override the entire ring
585                          * entry and don't want to lose the pointer.
586                          */
587                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
588
589                         *p_en2 = *p_ent;
590
591                         /* EBLOCK responsible to free the allocated p_ent */
592                         if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
593                                 kfree(p_ent);
594
595                         p_ent = p_en2;
596                 }
597         }
598
599         /* entry is to be placed in 'pending' queue */
600         switch (priority) {
601         case QED_SPQ_PRIORITY_NORMAL:
602                 list_add_tail(&p_ent->list, &p_spq->pending);
603                 p_spq->normal_count++;
604                 break;
605         case QED_SPQ_PRIORITY_HIGH:
606                 list_add(&p_ent->list, &p_spq->pending);
607                 p_spq->high_count++;
608                 break;
609         default:
610                 return -EINVAL;
611         }
612
613         return 0;
614 }
615
616 /***************************************************************************
617 * Accessor
618 ***************************************************************************/
619 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
620 {
621         if (!p_hwfn->p_spq)
622                 return 0xffffffff;      /* illegal */
623         return p_hwfn->p_spq->cid;
624 }
625
626 /***************************************************************************
627 * Posting new Ramrods
628 ***************************************************************************/
629 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
630                              struct list_head *head, u32 keep_reserve)
631 {
632         struct qed_spq *p_spq = p_hwfn->p_spq;
633         int rc;
634
635         while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
636                !list_empty(head)) {
637                 struct qed_spq_entry *p_ent =
638                         list_first_entry(head, struct qed_spq_entry, list);
639                 list_del(&p_ent->list);
640                 list_add_tail(&p_ent->list, &p_spq->completion_pending);
641                 p_spq->comp_sent_count++;
642
643                 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
644                 if (rc) {
645                         list_del(&p_ent->list);
646                         __qed_spq_return_entry(p_hwfn, p_ent);
647                         return rc;
648                 }
649         }
650
651         return 0;
652 }
653
654 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
655 {
656         struct qed_spq *p_spq = p_hwfn->p_spq;
657         struct qed_spq_entry *p_ent = NULL;
658
659         while (!list_empty(&p_spq->free_pool)) {
660                 if (list_empty(&p_spq->unlimited_pending))
661                         break;
662
663                 p_ent = list_first_entry(&p_spq->unlimited_pending,
664                                          struct qed_spq_entry, list);
665                 if (!p_ent)
666                         return -EINVAL;
667
668                 list_del(&p_ent->list);
669
670                 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
671         }
672
673         return qed_spq_post_list(p_hwfn, &p_spq->pending,
674                                  SPQ_HIGH_PRI_RESERVE_DEFAULT);
675 }
676
677 int qed_spq_post(struct qed_hwfn *p_hwfn,
678                  struct qed_spq_entry *p_ent, u8 *fw_return_code)
679 {
680         int rc = 0;
681         struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
682         bool b_ret_ent = true;
683
684         if (!p_hwfn)
685                 return -EINVAL;
686
687         if (!p_ent) {
688                 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
689                 return -EINVAL;
690         }
691
692         /* Complete the entry */
693         rc = qed_spq_fill_entry(p_hwfn, p_ent);
694
695         spin_lock_bh(&p_spq->lock);
696
697         /* Check return value after LOCK is taken for cleaner error flow */
698         if (rc)
699                 goto spq_post_fail;
700
701         /* Add the request to the pending queue */
702         rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
703         if (rc)
704                 goto spq_post_fail;
705
706         rc = qed_spq_pend_post(p_hwfn);
707         if (rc) {
708                 /* Since it's possible that pending failed for a different
709                  * entry [although unlikely], the failed entry was already
710                  * dealt with; No need to return it here.
711                  */
712                 b_ret_ent = false;
713                 goto spq_post_fail;
714         }
715
716         spin_unlock_bh(&p_spq->lock);
717
718         if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
719                 /* For entries in QED BLOCK mode, the completion code cannot
720                  * perform the necessary cleanup - if it did, we couldn't
721                  * access p_ent here to see whether it's successful or not.
722                  * Thus, after gaining the answer perform the cleanup here.
723                  */
724                 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
725
726                 if (p_ent->queue == &p_spq->unlimited_pending) {
727                         /* This is an allocated p_ent which does not need to
728                          * return to pool.
729                          */
730                         kfree(p_ent);
731                         return rc;
732                 }
733
734                 if (rc)
735                         goto spq_post_fail2;
736
737                 /* return to pool */
738                 qed_spq_return_entry(p_hwfn, p_ent);
739         }
740         return rc;
741
742 spq_post_fail2:
743         spin_lock_bh(&p_spq->lock);
744         list_del(&p_ent->list);
745         qed_chain_return_produced(&p_spq->chain);
746
747 spq_post_fail:
748         /* return to the free pool */
749         if (b_ret_ent)
750                 __qed_spq_return_entry(p_hwfn, p_ent);
751         spin_unlock_bh(&p_spq->lock);
752
753         return rc;
754 }
755
756 int qed_spq_completion(struct qed_hwfn *p_hwfn,
757                        __le16 echo,
758                        u8 fw_return_code,
759                        union event_ring_data *p_data)
760 {
761         struct qed_spq          *p_spq;
762         struct qed_spq_entry    *p_ent = NULL;
763         struct qed_spq_entry    *tmp;
764         struct qed_spq_entry    *found = NULL;
765         int                     rc;
766
767         if (!p_hwfn)
768                 return -EINVAL;
769
770         p_spq = p_hwfn->p_spq;
771         if (!p_spq)
772                 return -EINVAL;
773
774         spin_lock_bh(&p_spq->lock);
775         list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
776                 if (p_ent->elem.hdr.echo == echo) {
777                         u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
778
779                         list_del(&p_ent->list);
780
781                         /* Avoid overriding of SPQ entries when getting
782                          * out-of-order completions, by marking the completions
783                          * in a bitmap and increasing the chain consumer only
784                          * for the first successive completed entries.
785                          */
786                         __set_bit(pos, p_spq->p_comp_bitmap);
787
788                         while (test_bit(p_spq->comp_bitmap_idx,
789                                         p_spq->p_comp_bitmap)) {
790                                 __clear_bit(p_spq->comp_bitmap_idx,
791                                             p_spq->p_comp_bitmap);
792                                 p_spq->comp_bitmap_idx++;
793                                 qed_chain_return_produced(&p_spq->chain);
794                         }
795
796                         p_spq->comp_count++;
797                         found = p_ent;
798                         break;
799                 }
800
801                 /* This is relatively uncommon - depends on scenarios
802                  * which have mutliple per-PF sent ramrods.
803                  */
804                 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
805                            "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
806                            le16_to_cpu(echo),
807                            le16_to_cpu(p_ent->elem.hdr.echo));
808         }
809
810         /* Release lock before callback, as callback may post
811          * an additional ramrod.
812          */
813         spin_unlock_bh(&p_spq->lock);
814
815         if (!found) {
816                 DP_NOTICE(p_hwfn,
817                           "Failed to find an entry this EQE [echo %04x] completes\n",
818                           le16_to_cpu(echo));
819                 return -EEXIST;
820         }
821
822         DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
823                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
824                    le16_to_cpu(echo),
825                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
826         if (found->comp_cb.function)
827                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
828                                         fw_return_code);
829         else
830                 DP_VERBOSE(p_hwfn,
831                            QED_MSG_SPQ,
832                            "Got a completion without a callback function\n");
833
834         if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
835             (found->queue == &p_spq->unlimited_pending))
836                 /* EBLOCK  is responsible for returning its own entry into the
837                  * free list, unless it originally added the entry into the
838                  * unlimited pending list.
839                  */
840                 qed_spq_return_entry(p_hwfn, found);
841
842         /* Attempt to post pending requests */
843         spin_lock_bh(&p_spq->lock);
844         rc = qed_spq_pend_post(p_hwfn);
845         spin_unlock_bh(&p_spq->lock);
846
847         return rc;
848 }
849
850 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
851 {
852         struct qed_consq *p_consq;
853
854         /* Allocate ConsQ struct */
855         p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
856         if (!p_consq)
857                 return NULL;
858
859         /* Allocate and initialize EQ chain*/
860         if (qed_chain_alloc(p_hwfn->cdev,
861                             QED_CHAIN_USE_TO_PRODUCE,
862                             QED_CHAIN_MODE_PBL,
863                             QED_CHAIN_CNT_TYPE_U16,
864                             QED_CHAIN_PAGE_SIZE / 0x80,
865                             0x80, &p_consq->chain))
866                 goto consq_allocate_fail;
867
868         return p_consq;
869
870 consq_allocate_fail:
871         qed_consq_free(p_hwfn, p_consq);
872         return NULL;
873 }
874
875 void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
876 {
877         qed_chain_reset(&p_consq->chain);
878 }
879
880 void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
881 {
882         if (!p_consq)
883                 return;
884         qed_chain_free(p_hwfn->cdev, &p_consq->chain);
885         kfree(p_consq);
886 }