Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[cascardo/linux.git] / drivers / net / ethernet / qlogic / qed / qed_spq.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
21 #include "qed.h"
22 #include "qed_cxt.h"
23 #include "qed_dev_api.h"
24 #include "qed_hsi.h"
25 #include "qed_hw.h"
26 #include "qed_int.h"
27 #include "qed_mcp.h"
28 #include "qed_reg_addr.h"
29 #include "qed_sp.h"
30 #include "qed_sriov.h"
31
32 /***************************************************************************
33 * Structures & Definitions
34 ***************************************************************************/
35
36 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
37 #define SPQ_BLOCK_SLEEP_LENGTH          (1000)
38
39 /***************************************************************************
40 * Blocking Imp. (BLOCK/EBLOCK mode)
41 ***************************************************************************/
42 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
43                                 void *cookie,
44                                 union event_ring_data *data,
45                                 u8 fw_return_code)
46 {
47         struct qed_spq_comp_done *comp_done;
48
49         comp_done = (struct qed_spq_comp_done *)cookie;
50
51         comp_done->done                 = 0x1;
52         comp_done->fw_return_code       = fw_return_code;
53
54         /* make update visible to waiting thread */
55         smp_wmb();
56 }
57
58 static int qed_spq_block(struct qed_hwfn *p_hwfn,
59                          struct qed_spq_entry *p_ent,
60                          u8 *p_fw_ret)
61 {
62         int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
63         struct qed_spq_comp_done *comp_done;
64         int rc;
65
66         comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
67         while (sleep_count) {
68                 /* validate we receive completion update */
69                 smp_rmb();
70                 if (comp_done->done == 1) {
71                         if (p_fw_ret)
72                                 *p_fw_ret = comp_done->fw_return_code;
73                         return 0;
74                 }
75                 usleep_range(5000, 10000);
76                 sleep_count--;
77         }
78
79         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
80         rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
81         if (rc != 0)
82                 DP_NOTICE(p_hwfn, "MCP drain failed\n");
83
84         /* Retry after drain */
85         sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
86         while (sleep_count) {
87                 /* validate we receive completion update */
88                 smp_rmb();
89                 if (comp_done->done == 1) {
90                         if (p_fw_ret)
91                                 *p_fw_ret = comp_done->fw_return_code;
92                         return 0;
93                 }
94                 usleep_range(5000, 10000);
95                 sleep_count--;
96         }
97
98         if (comp_done->done == 1) {
99                 if (p_fw_ret)
100                         *p_fw_ret = comp_done->fw_return_code;
101                 return 0;
102         }
103
104         DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
105
106         return -EBUSY;
107 }
108
109 /***************************************************************************
110 * SPQ entries inner API
111 ***************************************************************************/
112 static int
113 qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
114                    struct qed_spq_entry *p_ent)
115 {
116         p_ent->flags = 0;
117
118         switch (p_ent->comp_mode) {
119         case QED_SPQ_MODE_EBLOCK:
120         case QED_SPQ_MODE_BLOCK:
121                 p_ent->comp_cb.function = qed_spq_blocking_cb;
122                 break;
123         case QED_SPQ_MODE_CB:
124                 break;
125         default:
126                 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
127                           p_ent->comp_mode);
128                 return -EINVAL;
129         }
130
131         DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
132                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
133                    p_ent->elem.hdr.cid,
134                    p_ent->elem.hdr.cmd_id,
135                    p_ent->elem.hdr.protocol_id,
136                    p_ent->elem.data_ptr.hi,
137                    p_ent->elem.data_ptr.lo,
138                    D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
139                            QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
140                            "MODE_CB"));
141
142         return 0;
143 }
144
145 /***************************************************************************
146 * HSI access
147 ***************************************************************************/
148 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
149                                   struct qed_spq *p_spq)
150 {
151         u16                             pq;
152         struct qed_cxt_info             cxt_info;
153         struct core_conn_context        *p_cxt;
154         union qed_qm_pq_params          pq_params;
155         int                             rc;
156
157         cxt_info.iid = p_spq->cid;
158
159         rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
160
161         if (rc < 0) {
162                 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
163                           p_spq->cid);
164                 return;
165         }
166
167         p_cxt = cxt_info.p_cxt;
168
169         SET_FIELD(p_cxt->xstorm_ag_context.flags10,
170                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
171         SET_FIELD(p_cxt->xstorm_ag_context.flags1,
172                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
173         SET_FIELD(p_cxt->xstorm_ag_context.flags9,
174                   XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
175
176         /* QM physical queue */
177         memset(&pq_params, 0, sizeof(pq_params));
178         pq_params.core.tc = LB_TC;
179         pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
180         p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
181
182         p_cxt->xstorm_st_context.spq_base_lo =
183                 DMA_LO_LE(p_spq->chain.p_phys_addr);
184         p_cxt->xstorm_st_context.spq_base_hi =
185                 DMA_HI_LE(p_spq->chain.p_phys_addr);
186
187         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
188                        p_hwfn->p_consq->chain.p_phys_addr);
189 }
190
191 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
192                            struct qed_spq *p_spq,
193                            struct qed_spq_entry *p_ent)
194 {
195         struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
196         u16 echo = qed_chain_get_prod_idx(p_chain);
197         struct slow_path_element        *elem;
198         struct core_db_data             db;
199
200         p_ent->elem.hdr.echo    = cpu_to_le16(echo);
201         elem = qed_chain_produce(p_chain);
202         if (!elem) {
203                 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
204                 return -EINVAL;
205         }
206
207         *elem = p_ent->elem; /* struct assignment */
208
209         /* send a doorbell on the slow hwfn session */
210         memset(&db, 0, sizeof(db));
211         SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
212         SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
213         SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
214                   DQ_XCM_CORE_SPQ_PROD_CMD);
215         db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
216         db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
217
218         /* make sure the SPQE is updated before the doorbell */
219         wmb();
220
221         DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
222
223         /* make sure doorbell is rang */
224         wmb();
225
226         DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
227                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
228                    qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
229                    p_spq->cid, db.params, db.agg_flags,
230                    qed_chain_get_prod_idx(p_chain));
231
232         return 0;
233 }
234
235 /***************************************************************************
236 * Asynchronous events
237 ***************************************************************************/
238 static int
239 qed_async_event_completion(struct qed_hwfn *p_hwfn,
240                            struct event_ring_entry *p_eqe)
241 {
242         switch (p_eqe->protocol_id) {
243         case PROTOCOLID_COMMON:
244                 return qed_sriov_eqe_event(p_hwfn,
245                                            p_eqe->opcode,
246                                            p_eqe->echo, &p_eqe->data);
247         default:
248                 DP_NOTICE(p_hwfn,
249                           "Unknown Async completion for protocol: %d\n",
250                           p_eqe->protocol_id);
251                 return -EINVAL;
252         }
253 }
254
255 /***************************************************************************
256 * EQ API
257 ***************************************************************************/
258 void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
259                         u16 prod)
260 {
261         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
262                    USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
263
264         REG_WR16(p_hwfn, addr, prod);
265
266         /* keep prod updates ordered */
267         mmiowb();
268 }
269
270 int qed_eq_completion(struct qed_hwfn *p_hwfn,
271                       void *cookie)
272
273 {
274         struct qed_eq *p_eq = cookie;
275         struct qed_chain *p_chain = &p_eq->chain;
276         int rc = 0;
277
278         /* take a snapshot of the FW consumer */
279         u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
280
281         DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
282
283         /* Need to guarantee the fw_cons index we use points to a usuable
284          * element (to comply with our chain), so our macros would comply
285          */
286         if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
287             qed_chain_get_usable_per_page(p_chain))
288                 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
289
290         /* Complete current segment of eq entries */
291         while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
292                 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
293
294                 if (!p_eqe) {
295                         rc = -EINVAL;
296                         break;
297                 }
298
299                 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
300                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
301                            p_eqe->opcode,
302                            p_eqe->protocol_id,
303                            p_eqe->reserved0,
304                            le16_to_cpu(p_eqe->echo),
305                            p_eqe->fw_return_code,
306                            p_eqe->flags);
307
308                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
309                         if (qed_async_event_completion(p_hwfn, p_eqe))
310                                 rc = -EINVAL;
311                 } else if (qed_spq_completion(p_hwfn,
312                                               p_eqe->echo,
313                                               p_eqe->fw_return_code,
314                                               &p_eqe->data)) {
315                         rc = -EINVAL;
316                 }
317
318                 qed_chain_recycle_consumed(p_chain);
319         }
320
321         qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
322
323         return rc;
324 }
325
326 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
327                             u16 num_elem)
328 {
329         struct qed_eq *p_eq;
330
331         /* Allocate EQ struct */
332         p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
333         if (!p_eq) {
334                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
335                 return NULL;
336         }
337
338         /* Allocate and initialize EQ chain*/
339         if (qed_chain_alloc(p_hwfn->cdev,
340                             QED_CHAIN_USE_TO_PRODUCE,
341                             QED_CHAIN_MODE_PBL,
342                             QED_CHAIN_CNT_TYPE_U16,
343                             num_elem,
344                             sizeof(union event_ring_element),
345                             &p_eq->chain)) {
346                 DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
347                 goto eq_allocate_fail;
348         }
349
350         /* register EQ completion on the SP SB */
351         qed_int_register_cb(p_hwfn,
352                             qed_eq_completion,
353                             p_eq,
354                             &p_eq->eq_sb_index,
355                             &p_eq->p_fw_cons);
356
357         return p_eq;
358
359 eq_allocate_fail:
360         qed_eq_free(p_hwfn, p_eq);
361         return NULL;
362 }
363
364 void qed_eq_setup(struct qed_hwfn *p_hwfn,
365                   struct qed_eq *p_eq)
366 {
367         qed_chain_reset(&p_eq->chain);
368 }
369
370 void qed_eq_free(struct qed_hwfn *p_hwfn,
371                  struct qed_eq *p_eq)
372 {
373         if (!p_eq)
374                 return;
375         qed_chain_free(p_hwfn->cdev, &p_eq->chain);
376         kfree(p_eq);
377 }
378
379 /***************************************************************************
380 * CQE API - manipulate EQ functionality
381 ***************************************************************************/
382 static int qed_cqe_completion(
383         struct qed_hwfn *p_hwfn,
384         struct eth_slow_path_rx_cqe *cqe,
385         enum protocol_type protocol)
386 {
387         if (IS_VF(p_hwfn->cdev))
388                 return 0;
389
390         /* @@@tmp - it's possible we'll eventually want to handle some
391          * actual commands that can arrive here, but for now this is only
392          * used to complete the ramrod using the echo value on the cqe
393          */
394         return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
395 }
396
397 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
398                            struct eth_slow_path_rx_cqe *cqe)
399 {
400         int rc;
401
402         rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
403         if (rc)
404                 DP_NOTICE(p_hwfn,
405                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
406                           cqe->ramrod_cmd_id);
407
408         return rc;
409 }
410
411 /***************************************************************************
412 * Slow hwfn Queue (spq)
413 ***************************************************************************/
414 void qed_spq_setup(struct qed_hwfn *p_hwfn)
415 {
416         struct qed_spq *p_spq = p_hwfn->p_spq;
417         struct qed_spq_entry *p_virt = NULL;
418         dma_addr_t p_phys = 0;
419         u32 i, capacity;
420
421         INIT_LIST_HEAD(&p_spq->pending);
422         INIT_LIST_HEAD(&p_spq->completion_pending);
423         INIT_LIST_HEAD(&p_spq->free_pool);
424         INIT_LIST_HEAD(&p_spq->unlimited_pending);
425         spin_lock_init(&p_spq->lock);
426
427         /* SPQ empty pool */
428         p_phys  = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
429         p_virt  = p_spq->p_virt;
430
431         capacity = qed_chain_get_capacity(&p_spq->chain);
432         for (i = 0; i < capacity; i++) {
433                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
434
435                 list_add_tail(&p_virt->list, &p_spq->free_pool);
436
437                 p_virt++;
438                 p_phys += sizeof(struct qed_spq_entry);
439         }
440
441         /* Statistics */
442         p_spq->normal_count             = 0;
443         p_spq->comp_count               = 0;
444         p_spq->comp_sent_count          = 0;
445         p_spq->unlimited_pending_count  = 0;
446
447         bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
448         p_spq->comp_bitmap_idx = 0;
449
450         /* SPQ cid, cannot fail */
451         qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
452         qed_spq_hw_initialize(p_hwfn, p_spq);
453
454         /* reset the chain itself */
455         qed_chain_reset(&p_spq->chain);
456 }
457
458 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
459 {
460         struct qed_spq_entry *p_virt = NULL;
461         struct qed_spq *p_spq = NULL;
462         dma_addr_t p_phys = 0;
463         u32 capacity;
464
465         /* SPQ struct */
466         p_spq =
467                 kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
468         if (!p_spq) {
469                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
470                 return -ENOMEM;
471         }
472
473         /* SPQ ring  */
474         if (qed_chain_alloc(p_hwfn->cdev,
475                             QED_CHAIN_USE_TO_PRODUCE,
476                             QED_CHAIN_MODE_SINGLE,
477                             QED_CHAIN_CNT_TYPE_U16,
478                             0,   /* N/A when the mode is SINGLE */
479                             sizeof(struct slow_path_element),
480                             &p_spq->chain)) {
481                 DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
482                 goto spq_allocate_fail;
483         }
484
485         /* allocate and fill the SPQ elements (incl. ramrod data list) */
486         capacity = qed_chain_get_capacity(&p_spq->chain);
487         p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
488                                     capacity *
489                                     sizeof(struct qed_spq_entry),
490                                     &p_phys, GFP_KERNEL);
491
492         if (!p_virt)
493                 goto spq_allocate_fail;
494
495         p_spq->p_virt = p_virt;
496         p_spq->p_phys = p_phys;
497         p_hwfn->p_spq = p_spq;
498
499         return 0;
500
501 spq_allocate_fail:
502         qed_chain_free(p_hwfn->cdev, &p_spq->chain);
503         kfree(p_spq);
504         return -ENOMEM;
505 }
506
507 void qed_spq_free(struct qed_hwfn *p_hwfn)
508 {
509         struct qed_spq *p_spq = p_hwfn->p_spq;
510         u32 capacity;
511
512         if (!p_spq)
513                 return;
514
515         if (p_spq->p_virt) {
516                 capacity = qed_chain_get_capacity(&p_spq->chain);
517                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
518                                   capacity *
519                                   sizeof(struct qed_spq_entry),
520                                   p_spq->p_virt, p_spq->p_phys);
521         }
522
523         qed_chain_free(p_hwfn->cdev, &p_spq->chain);
524         ;
525         kfree(p_spq);
526 }
527
528 int
529 qed_spq_get_entry(struct qed_hwfn *p_hwfn,
530                   struct qed_spq_entry **pp_ent)
531 {
532         struct qed_spq *p_spq = p_hwfn->p_spq;
533         struct qed_spq_entry *p_ent = NULL;
534         int rc = 0;
535
536         spin_lock_bh(&p_spq->lock);
537
538         if (list_empty(&p_spq->free_pool)) {
539                 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
540                 if (!p_ent) {
541                         rc = -ENOMEM;
542                         goto out_unlock;
543                 }
544                 p_ent->queue = &p_spq->unlimited_pending;
545         } else {
546                 p_ent = list_first_entry(&p_spq->free_pool,
547                                          struct qed_spq_entry,
548                                          list);
549                 list_del(&p_ent->list);
550                 p_ent->queue = &p_spq->pending;
551         }
552
553         *pp_ent = p_ent;
554
555 out_unlock:
556         spin_unlock_bh(&p_spq->lock);
557         return rc;
558 }
559
560 /* Locked variant; Should be called while the SPQ lock is taken */
561 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
562                                    struct qed_spq_entry *p_ent)
563 {
564         list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
565 }
566
567 void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
568                           struct qed_spq_entry *p_ent)
569 {
570         spin_lock_bh(&p_hwfn->p_spq->lock);
571         __qed_spq_return_entry(p_hwfn, p_ent);
572         spin_unlock_bh(&p_hwfn->p_spq->lock);
573 }
574
575 /**
576  * @brief qed_spq_add_entry - adds a new entry to the pending
577  *        list. Should be used while lock is being held.
578  *
579  * Addes an entry to the pending list is there is room (en empty
580  * element is available in the free_pool), or else places the
581  * entry in the unlimited_pending pool.
582  *
583  * @param p_hwfn
584  * @param p_ent
585  * @param priority
586  *
587  * @return int
588  */
589 static int
590 qed_spq_add_entry(struct qed_hwfn *p_hwfn,
591                   struct qed_spq_entry *p_ent,
592                   enum spq_priority priority)
593 {
594         struct qed_spq *p_spq = p_hwfn->p_spq;
595
596         if (p_ent->queue == &p_spq->unlimited_pending) {
597
598                 if (list_empty(&p_spq->free_pool)) {
599                         list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
600                         p_spq->unlimited_pending_count++;
601
602                         return 0;
603                 } else {
604                         struct qed_spq_entry *p_en2;
605
606                         p_en2 = list_first_entry(&p_spq->free_pool,
607                                                  struct qed_spq_entry,
608                                                  list);
609                         list_del(&p_en2->list);
610
611                         /* Copy the ring element physical pointer to the new
612                          * entry, since we are about to override the entire ring
613                          * entry and don't want to lose the pointer.
614                          */
615                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
616
617                         *p_en2 = *p_ent;
618
619                         /* EBLOCK responsible to free the allocated p_ent */
620                         if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
621                                 kfree(p_ent);
622
623                         p_ent = p_en2;
624                 }
625         }
626
627         /* entry is to be placed in 'pending' queue */
628         switch (priority) {
629         case QED_SPQ_PRIORITY_NORMAL:
630                 list_add_tail(&p_ent->list, &p_spq->pending);
631                 p_spq->normal_count++;
632                 break;
633         case QED_SPQ_PRIORITY_HIGH:
634                 list_add(&p_ent->list, &p_spq->pending);
635                 p_spq->high_count++;
636                 break;
637         default:
638                 return -EINVAL;
639         }
640
641         return 0;
642 }
643
644 /***************************************************************************
645 * Accessor
646 ***************************************************************************/
647 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
648 {
649         if (!p_hwfn->p_spq)
650                 return 0xffffffff;      /* illegal */
651         return p_hwfn->p_spq->cid;
652 }
653
654 /***************************************************************************
655 * Posting new Ramrods
656 ***************************************************************************/
657 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
658                              struct list_head *head,
659                              u32 keep_reserve)
660 {
661         struct qed_spq *p_spq = p_hwfn->p_spq;
662         int rc;
663
664         while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
665                !list_empty(head)) {
666                 struct qed_spq_entry *p_ent =
667                         list_first_entry(head, struct qed_spq_entry, list);
668                 list_del(&p_ent->list);
669                 list_add_tail(&p_ent->list, &p_spq->completion_pending);
670                 p_spq->comp_sent_count++;
671
672                 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
673                 if (rc) {
674                         list_del(&p_ent->list);
675                         __qed_spq_return_entry(p_hwfn, p_ent);
676                         return rc;
677                 }
678         }
679
680         return 0;
681 }
682
683 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
684 {
685         struct qed_spq *p_spq = p_hwfn->p_spq;
686         struct qed_spq_entry *p_ent = NULL;
687
688         while (!list_empty(&p_spq->free_pool)) {
689                 if (list_empty(&p_spq->unlimited_pending))
690                         break;
691
692                 p_ent = list_first_entry(&p_spq->unlimited_pending,
693                                          struct qed_spq_entry,
694                                          list);
695                 if (!p_ent)
696                         return -EINVAL;
697
698                 list_del(&p_ent->list);
699
700                 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
701         }
702
703         return qed_spq_post_list(p_hwfn, &p_spq->pending,
704                                  SPQ_HIGH_PRI_RESERVE_DEFAULT);
705 }
706
707 int qed_spq_post(struct qed_hwfn *p_hwfn,
708                  struct qed_spq_entry *p_ent,
709                  u8 *fw_return_code)
710 {
711         int rc = 0;
712         struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
713         bool b_ret_ent = true;
714
715         if (!p_hwfn)
716                 return -EINVAL;
717
718         if (!p_ent) {
719                 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
720                 return -EINVAL;
721         }
722
723         /* Complete the entry */
724         rc = qed_spq_fill_entry(p_hwfn, p_ent);
725
726         spin_lock_bh(&p_spq->lock);
727
728         /* Check return value after LOCK is taken for cleaner error flow */
729         if (rc)
730                 goto spq_post_fail;
731
732         /* Add the request to the pending queue */
733         rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
734         if (rc)
735                 goto spq_post_fail;
736
737         rc = qed_spq_pend_post(p_hwfn);
738         if (rc) {
739                 /* Since it's possible that pending failed for a different
740                  * entry [although unlikely], the failed entry was already
741                  * dealt with; No need to return it here.
742                  */
743                 b_ret_ent = false;
744                 goto spq_post_fail;
745         }
746
747         spin_unlock_bh(&p_spq->lock);
748
749         if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
750                 /* For entries in QED BLOCK mode, the completion code cannot
751                  * perform the necessary cleanup - if it did, we couldn't
752                  * access p_ent here to see whether it's successful or not.
753                  * Thus, after gaining the answer perform the cleanup here.
754                  */
755                 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
756
757                 if (p_ent->queue == &p_spq->unlimited_pending) {
758                         /* This is an allocated p_ent which does not need to
759                          * return to pool.
760                          */
761                         kfree(p_ent);
762                         return rc;
763                 }
764
765                 if (rc)
766                         goto spq_post_fail2;
767
768                 /* return to pool */
769                 qed_spq_return_entry(p_hwfn, p_ent);
770         }
771         return rc;
772
773 spq_post_fail2:
774         spin_lock_bh(&p_spq->lock);
775         list_del(&p_ent->list);
776         qed_chain_return_produced(&p_spq->chain);
777
778 spq_post_fail:
779         /* return to the free pool */
780         if (b_ret_ent)
781                 __qed_spq_return_entry(p_hwfn, p_ent);
782         spin_unlock_bh(&p_spq->lock);
783
784         return rc;
785 }
786
787 int qed_spq_completion(struct qed_hwfn *p_hwfn,
788                        __le16 echo,
789                        u8 fw_return_code,
790                        union event_ring_data *p_data)
791 {
792         struct qed_spq          *p_spq;
793         struct qed_spq_entry    *p_ent = NULL;
794         struct qed_spq_entry    *tmp;
795         struct qed_spq_entry    *found = NULL;
796         int                     rc;
797
798         if (!p_hwfn)
799                 return -EINVAL;
800
801         p_spq = p_hwfn->p_spq;
802         if (!p_spq)
803                 return -EINVAL;
804
805         spin_lock_bh(&p_spq->lock);
806         list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
807                                  list) {
808                 if (p_ent->elem.hdr.echo == echo) {
809                         u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
810
811                         list_del(&p_ent->list);
812
813                         /* Avoid overriding of SPQ entries when getting
814                          * out-of-order completions, by marking the completions
815                          * in a bitmap and increasing the chain consumer only
816                          * for the first successive completed entries.
817                          */
818                         __set_bit(pos, p_spq->p_comp_bitmap);
819
820                         while (test_bit(p_spq->comp_bitmap_idx,
821                                         p_spq->p_comp_bitmap)) {
822                                 __clear_bit(p_spq->comp_bitmap_idx,
823                                             p_spq->p_comp_bitmap);
824                                 p_spq->comp_bitmap_idx++;
825                                 qed_chain_return_produced(&p_spq->chain);
826                         }
827
828                         p_spq->comp_count++;
829                         found = p_ent;
830                         break;
831                 }
832
833                 /* This is relatively uncommon - depends on scenarios
834                  * which have mutliple per-PF sent ramrods.
835                  */
836                 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
837                            "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
838                            le16_to_cpu(echo),
839                            le16_to_cpu(p_ent->elem.hdr.echo));
840         }
841
842         /* Release lock before callback, as callback may post
843          * an additional ramrod.
844          */
845         spin_unlock_bh(&p_spq->lock);
846
847         if (!found) {
848                 DP_NOTICE(p_hwfn,
849                           "Failed to find an entry this EQE completes\n");
850                 return -EEXIST;
851         }
852
853         DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
854                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
855         if (found->comp_cb.function)
856                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
857                                         fw_return_code);
858
859         if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
860             (found->queue == &p_spq->unlimited_pending))
861                 /* EBLOCK  is responsible for returning its own entry into the
862                  * free list, unless it originally added the entry into the
863                  * unlimited pending list.
864                  */
865                 qed_spq_return_entry(p_hwfn, found);
866
867         /* Attempt to post pending requests */
868         spin_lock_bh(&p_spq->lock);
869         rc = qed_spq_pend_post(p_hwfn);
870         spin_unlock_bh(&p_spq->lock);
871
872         return rc;
873 }
874
875 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
876 {
877         struct qed_consq *p_consq;
878
879         /* Allocate ConsQ struct */
880         p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
881         if (!p_consq) {
882                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
883                 return NULL;
884         }
885
886         /* Allocate and initialize EQ chain*/
887         if (qed_chain_alloc(p_hwfn->cdev,
888                             QED_CHAIN_USE_TO_PRODUCE,
889                             QED_CHAIN_MODE_PBL,
890                             QED_CHAIN_CNT_TYPE_U16,
891                             QED_CHAIN_PAGE_SIZE / 0x80,
892                             0x80, &p_consq->chain)) {
893                 DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
894                 goto consq_allocate_fail;
895         }
896
897         return p_consq;
898
899 consq_allocate_fail:
900         qed_consq_free(p_hwfn, p_consq);
901         return NULL;
902 }
903
904 void qed_consq_setup(struct qed_hwfn *p_hwfn,
905                      struct qed_consq *p_consq)
906 {
907         qed_chain_reset(&p_consq->chain);
908 }
909
910 void qed_consq_free(struct qed_hwfn *p_hwfn,
911                     struct qed_consq *p_consq)
912 {
913         if (!p_consq)
914                 return;
915         qed_chain_free(p_hwfn->cdev, &p_consq->chain);
916         kfree(p_consq);
917 }