Merge tag 'trace-v4.7-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / core.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/core.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/u64_stats_sync.h>
46 #include <linux/netdevice.h>
47 #include <linux/completion.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/types.h>
51 #include <linux/string.h>
52 #include <linux/gfp.h>
53 #include <linux/random.h>
54 #include <linux/jiffies.h>
55 #include <linux/mutex.h>
56 #include <linux/rcupdate.h>
57 #include <linux/slab.h>
58 #include <linux/workqueue.h>
59 #include <asm/byteorder.h>
60 #include <net/devlink.h>
61
62 #include "core.h"
63 #include "item.h"
64 #include "cmd.h"
65 #include "port.h"
66 #include "trap.h"
67 #include "emad.h"
68 #include "reg.h"
69
70 static LIST_HEAD(mlxsw_core_driver_list);
71 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
72
73 static const char mlxsw_core_driver_name[] = "mlxsw_core";
74
75 static struct dentry *mlxsw_core_dbg_root;
76
77 static struct workqueue_struct *mlxsw_wq;
78
79 struct mlxsw_core_pcpu_stats {
80         u64                     trap_rx_packets[MLXSW_TRAP_ID_MAX];
81         u64                     trap_rx_bytes[MLXSW_TRAP_ID_MAX];
82         u64                     port_rx_packets[MLXSW_PORT_MAX_PORTS];
83         u64                     port_rx_bytes[MLXSW_PORT_MAX_PORTS];
84         struct u64_stats_sync   syncp;
85         u32                     trap_rx_dropped[MLXSW_TRAP_ID_MAX];
86         u32                     port_rx_dropped[MLXSW_PORT_MAX_PORTS];
87         u32                     trap_rx_invalid;
88         u32                     port_rx_invalid;
89 };
90
91 struct mlxsw_core {
92         struct mlxsw_driver *driver;
93         const struct mlxsw_bus *bus;
94         void *bus_priv;
95         const struct mlxsw_bus_info *bus_info;
96         struct list_head rx_listener_list;
97         struct list_head event_listener_list;
98         struct {
99                 atomic64_t tid;
100                 struct list_head trans_list;
101                 spinlock_t trans_list_lock; /* protects trans_list writes */
102                 bool use_emad;
103         } emad;
104         struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
105         struct dentry *dbg_dir;
106         struct {
107                 struct debugfs_blob_wrapper vsd_blob;
108                 struct debugfs_blob_wrapper psid_blob;
109         } dbg;
110         struct {
111                 u8 *mapping; /* lag_id+port_index to local_port mapping */
112         } lag;
113         struct mlxsw_hwmon *hwmon;
114         unsigned long driver_priv[0];
115         /* driver_priv has to be always the last item */
116 };
117
118 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
119 {
120         return mlxsw_core->driver_priv;
121 }
122 EXPORT_SYMBOL(mlxsw_core_driver_priv);
123
124 struct mlxsw_rx_listener_item {
125         struct list_head list;
126         struct mlxsw_rx_listener rxl;
127         void *priv;
128 };
129
130 struct mlxsw_event_listener_item {
131         struct list_head list;
132         struct mlxsw_event_listener el;
133         void *priv;
134 };
135
136 /******************
137  * EMAD processing
138  ******************/
139
140 /* emad_eth_hdr_dmac
141  * Destination MAC in EMAD's Ethernet header.
142  * Must be set to 01:02:c9:00:00:01
143  */
144 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
145
146 /* emad_eth_hdr_smac
147  * Source MAC in EMAD's Ethernet header.
148  * Must be set to 00:02:c9:01:02:03
149  */
150 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
151
152 /* emad_eth_hdr_ethertype
153  * Ethertype in EMAD's Ethernet header.
154  * Must be set to 0x8932
155  */
156 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
157
158 /* emad_eth_hdr_mlx_proto
159  * Mellanox protocol.
160  * Must be set to 0x0.
161  */
162 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
163
164 /* emad_eth_hdr_ver
165  * Mellanox protocol version.
166  * Must be set to 0x0.
167  */
168 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
169
170 /* emad_op_tlv_type
171  * Type of the TLV.
172  * Must be set to 0x1 (operation TLV).
173  */
174 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
175
176 /* emad_op_tlv_len
177  * Length of the operation TLV in u32.
178  * Must be set to 0x4.
179  */
180 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
181
182 /* emad_op_tlv_dr
183  * Direct route bit. Setting to 1 indicates the EMAD is a direct route
184  * EMAD. DR TLV must follow.
185  *
186  * Note: Currently not supported and must not be set.
187  */
188 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
189
190 /* emad_op_tlv_status
191  * Returned status in case of EMAD response. Must be set to 0 in case
192  * of EMAD request.
193  * 0x0 - success
194  * 0x1 - device is busy. Requester should retry
195  * 0x2 - Mellanox protocol version not supported
196  * 0x3 - unknown TLV
197  * 0x4 - register not supported
198  * 0x5 - operation class not supported
199  * 0x6 - EMAD method not supported
200  * 0x7 - bad parameter (e.g. port out of range)
201  * 0x8 - resource not available
202  * 0x9 - message receipt acknowledgment. Requester should retry
203  * 0x70 - internal error
204  */
205 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
206
207 /* emad_op_tlv_register_id
208  * Register ID of register within register TLV.
209  */
210 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
211
212 /* emad_op_tlv_r
213  * Response bit. Setting to 1 indicates Response, otherwise request.
214  */
215 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
216
217 /* emad_op_tlv_method
218  * EMAD method type.
219  * 0x1 - query
220  * 0x2 - write
221  * 0x3 - send (currently not supported)
222  * 0x4 - event
223  */
224 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
225
226 /* emad_op_tlv_class
227  * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
228  */
229 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
230
231 /* emad_op_tlv_tid
232  * EMAD transaction ID. Used for pairing request and response EMADs.
233  */
234 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
235
236 /* emad_reg_tlv_type
237  * Type of the TLV.
238  * Must be set to 0x3 (register TLV).
239  */
240 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
241
242 /* emad_reg_tlv_len
243  * Length of the operation TLV in u32.
244  */
245 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
246
247 /* emad_end_tlv_type
248  * Type of the TLV.
249  * Must be set to 0x0 (end TLV).
250  */
251 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
252
253 /* emad_end_tlv_len
254  * Length of the end TLV in u32.
255  * Must be set to 1.
256  */
257 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
258
259 enum mlxsw_core_reg_access_type {
260         MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
261         MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
262 };
263
264 static inline const char *
265 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
266 {
267         switch (type) {
268         case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
269                 return "query";
270         case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
271                 return "write";
272         }
273         BUG();
274 }
275
276 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
277 {
278         mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
279         mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
280 }
281
282 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
283                                     const struct mlxsw_reg_info *reg,
284                                     char *payload)
285 {
286         mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
287         mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
288         memcpy(reg_tlv + sizeof(u32), payload, reg->len);
289 }
290
291 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
292                                    const struct mlxsw_reg_info *reg,
293                                    enum mlxsw_core_reg_access_type type,
294                                    u64 tid)
295 {
296         mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
297         mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
298         mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
299         mlxsw_emad_op_tlv_status_set(op_tlv, 0);
300         mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
301         mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
302         if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
303                 mlxsw_emad_op_tlv_method_set(op_tlv,
304                                              MLXSW_EMAD_OP_TLV_METHOD_QUERY);
305         else
306                 mlxsw_emad_op_tlv_method_set(op_tlv,
307                                              MLXSW_EMAD_OP_TLV_METHOD_WRITE);
308         mlxsw_emad_op_tlv_class_set(op_tlv,
309                                     MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
310         mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
311 }
312
313 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
314 {
315         char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
316
317         mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
318         mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
319         mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
320         mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
321         mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
322
323         skb_reset_mac_header(skb);
324
325         return 0;
326 }
327
328 static void mlxsw_emad_construct(struct sk_buff *skb,
329                                  const struct mlxsw_reg_info *reg,
330                                  char *payload,
331                                  enum mlxsw_core_reg_access_type type,
332                                  u64 tid)
333 {
334         char *buf;
335
336         buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
337         mlxsw_emad_pack_end_tlv(buf);
338
339         buf = skb_push(skb, reg->len + sizeof(u32));
340         mlxsw_emad_pack_reg_tlv(buf, reg, payload);
341
342         buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
343         mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
344
345         mlxsw_emad_construct_eth_hdr(skb);
346 }
347
348 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
349 {
350         return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
351 }
352
353 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
354 {
355         return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
356                                       MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
357 }
358
359 static char *mlxsw_emad_reg_payload(const char *op_tlv)
360 {
361         return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
362 }
363
364 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
365 {
366         char *op_tlv;
367
368         op_tlv = mlxsw_emad_op_tlv(skb);
369         return mlxsw_emad_op_tlv_tid_get(op_tlv);
370 }
371
372 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
373 {
374         char *op_tlv;
375
376         op_tlv = mlxsw_emad_op_tlv(skb);
377         return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
378 }
379
380 static int mlxsw_emad_process_status(char *op_tlv,
381                                      enum mlxsw_emad_op_tlv_status *p_status)
382 {
383         *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
384
385         switch (*p_status) {
386         case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
387                 return 0;
388         case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
389         case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
390                 return -EAGAIN;
391         case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
392         case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
393         case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
394         case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
395         case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
396         case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
397         case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
398         case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
399         default:
400                 return -EIO;
401         }
402 }
403
404 static int
405 mlxsw_emad_process_status_skb(struct sk_buff *skb,
406                               enum mlxsw_emad_op_tlv_status *p_status)
407 {
408         return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
409 }
410
411 struct mlxsw_reg_trans {
412         struct list_head list;
413         struct list_head bulk_list;
414         struct mlxsw_core *core;
415         struct sk_buff *tx_skb;
416         struct mlxsw_tx_info tx_info;
417         struct delayed_work timeout_dw;
418         unsigned int retries;
419         u64 tid;
420         struct completion completion;
421         atomic_t active;
422         mlxsw_reg_trans_cb_t *cb;
423         unsigned long cb_priv;
424         const struct mlxsw_reg_info *reg;
425         enum mlxsw_core_reg_access_type type;
426         int err;
427         enum mlxsw_emad_op_tlv_status emad_status;
428         struct rcu_head rcu;
429 };
430
431 #define MLXSW_EMAD_TIMEOUT_MS 200
432
433 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
434 {
435         unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
436
437         mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
438 }
439
440 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
441                                struct mlxsw_reg_trans *trans)
442 {
443         struct sk_buff *skb;
444         int err;
445
446         skb = skb_copy(trans->tx_skb, GFP_KERNEL);
447         if (!skb)
448                 return -ENOMEM;
449
450         atomic_set(&trans->active, 1);
451         err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
452         if (err) {
453                 dev_kfree_skb(skb);
454                 return err;
455         }
456         mlxsw_emad_trans_timeout_schedule(trans);
457         return 0;
458 }
459
460 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
461 {
462         struct mlxsw_core *mlxsw_core = trans->core;
463
464         dev_kfree_skb(trans->tx_skb);
465         spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
466         list_del_rcu(&trans->list);
467         spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
468         trans->err = err;
469         complete(&trans->completion);
470 }
471
472 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
473                                       struct mlxsw_reg_trans *trans)
474 {
475         int err;
476
477         if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
478                 trans->retries++;
479                 err = mlxsw_emad_transmit(trans->core, trans);
480                 if (err == 0)
481                         return;
482         } else {
483                 err = -EIO;
484         }
485         mlxsw_emad_trans_finish(trans, err);
486 }
487
488 static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
489 {
490         struct mlxsw_reg_trans *trans = container_of(work,
491                                                      struct mlxsw_reg_trans,
492                                                      timeout_dw.work);
493
494         if (!atomic_dec_and_test(&trans->active))
495                 return;
496
497         mlxsw_emad_transmit_retry(trans->core, trans);
498 }
499
500 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
501                                         struct mlxsw_reg_trans *trans,
502                                         struct sk_buff *skb)
503 {
504         int err;
505
506         if (!atomic_dec_and_test(&trans->active))
507                 return;
508
509         err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
510         if (err == -EAGAIN) {
511                 mlxsw_emad_transmit_retry(mlxsw_core, trans);
512         } else {
513                 if (err == 0) {
514                         char *op_tlv = mlxsw_emad_op_tlv(skb);
515
516                         if (trans->cb)
517                                 trans->cb(mlxsw_core,
518                                           mlxsw_emad_reg_payload(op_tlv),
519                                           trans->reg->len, trans->cb_priv);
520                 }
521                 mlxsw_emad_trans_finish(trans, err);
522         }
523 }
524
525 /* called with rcu read lock held */
526 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
527                                         void *priv)
528 {
529         struct mlxsw_core *mlxsw_core = priv;
530         struct mlxsw_reg_trans *trans;
531
532         if (!mlxsw_emad_is_resp(skb))
533                 goto free_skb;
534
535         list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
536                 if (mlxsw_emad_get_tid(skb) == trans->tid) {
537                         mlxsw_emad_process_response(mlxsw_core, trans, skb);
538                         break;
539                 }
540         }
541
542 free_skb:
543         dev_kfree_skb(skb);
544 }
545
546 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
547         .func = mlxsw_emad_rx_listener_func,
548         .local_port = MLXSW_PORT_DONT_CARE,
549         .trap_id = MLXSW_TRAP_ID_ETHEMAD,
550 };
551
552 static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
553 {
554         char htgt_pl[MLXSW_REG_HTGT_LEN];
555         char hpkt_pl[MLXSW_REG_HPKT_LEN];
556         int err;
557
558         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
559         err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
560         if (err)
561                 return err;
562
563         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
564                             MLXSW_TRAP_ID_ETHEMAD);
565         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
566 }
567
568 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
569 {
570         u64 tid;
571         int err;
572
573         /* Set the upper 32 bits of the transaction ID field to a random
574          * number. This allows us to discard EMADs addressed to other
575          * devices.
576          */
577         get_random_bytes(&tid, 4);
578         tid <<= 32;
579         atomic64_set(&mlxsw_core->emad.tid, tid);
580
581         INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
582         spin_lock_init(&mlxsw_core->emad.trans_list_lock);
583
584         err = mlxsw_core_rx_listener_register(mlxsw_core,
585                                               &mlxsw_emad_rx_listener,
586                                               mlxsw_core);
587         if (err)
588                 return err;
589
590         err = mlxsw_emad_traps_set(mlxsw_core);
591         if (err)
592                 goto err_emad_trap_set;
593
594         mlxsw_core->emad.use_emad = true;
595
596         return 0;
597
598 err_emad_trap_set:
599         mlxsw_core_rx_listener_unregister(mlxsw_core,
600                                           &mlxsw_emad_rx_listener,
601                                           mlxsw_core);
602         return err;
603 }
604
605 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
606 {
607         char hpkt_pl[MLXSW_REG_HPKT_LEN];
608
609         mlxsw_core->emad.use_emad = false;
610         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
611                             MLXSW_TRAP_ID_ETHEMAD);
612         mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
613
614         mlxsw_core_rx_listener_unregister(mlxsw_core,
615                                           &mlxsw_emad_rx_listener,
616                                           mlxsw_core);
617 }
618
619 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
620                                         u16 reg_len)
621 {
622         struct sk_buff *skb;
623         u16 emad_len;
624
625         emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
626                     (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
627                     sizeof(u32) + mlxsw_core->driver->txhdr_len);
628         if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
629                 return NULL;
630
631         skb = netdev_alloc_skb(NULL, emad_len);
632         if (!skb)
633                 return NULL;
634         memset(skb->data, 0, emad_len);
635         skb_reserve(skb, emad_len);
636
637         return skb;
638 }
639
640 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
641                                  const struct mlxsw_reg_info *reg,
642                                  char *payload,
643                                  enum mlxsw_core_reg_access_type type,
644                                  struct mlxsw_reg_trans *trans,
645                                  struct list_head *bulk_list,
646                                  mlxsw_reg_trans_cb_t *cb,
647                                  unsigned long cb_priv, u64 tid)
648 {
649         struct sk_buff *skb;
650         int err;
651
652         dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
653                 trans->tid, reg->id, mlxsw_reg_id_str(reg->id),
654                 mlxsw_core_reg_access_type_str(type));
655
656         skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
657         if (!skb)
658                 return -ENOMEM;
659
660         list_add_tail(&trans->bulk_list, bulk_list);
661         trans->core = mlxsw_core;
662         trans->tx_skb = skb;
663         trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
664         trans->tx_info.is_emad = true;
665         INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
666         trans->tid = tid;
667         init_completion(&trans->completion);
668         trans->cb = cb;
669         trans->cb_priv = cb_priv;
670         trans->reg = reg;
671         trans->type = type;
672
673         mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
674         mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
675
676         spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
677         list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
678         spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
679         err = mlxsw_emad_transmit(mlxsw_core, trans);
680         if (err)
681                 goto err_out;
682         return 0;
683
684 err_out:
685         spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
686         list_del_rcu(&trans->list);
687         spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
688         list_del(&trans->bulk_list);
689         dev_kfree_skb(trans->tx_skb);
690         return err;
691 }
692
693 /*****************
694  * Core functions
695  *****************/
696
697 static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
698 {
699         struct mlxsw_core *mlxsw_core = file->private;
700         struct mlxsw_core_pcpu_stats *p;
701         u64 rx_packets, rx_bytes;
702         u64 tmp_rx_packets, tmp_rx_bytes;
703         u32 rx_dropped, rx_invalid;
704         unsigned int start;
705         int i;
706         int j;
707         static const char hdr[] =
708                 "     NUM   RX_PACKETS     RX_BYTES RX_DROPPED\n";
709
710         seq_printf(file, hdr);
711         for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
712                 rx_packets = 0;
713                 rx_bytes = 0;
714                 rx_dropped = 0;
715                 for_each_possible_cpu(j) {
716                         p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
717                         do {
718                                 start = u64_stats_fetch_begin(&p->syncp);
719                                 tmp_rx_packets = p->trap_rx_packets[i];
720                                 tmp_rx_bytes = p->trap_rx_bytes[i];
721                         } while (u64_stats_fetch_retry(&p->syncp, start));
722
723                         rx_packets += tmp_rx_packets;
724                         rx_bytes += tmp_rx_bytes;
725                         rx_dropped += p->trap_rx_dropped[i];
726                 }
727                 seq_printf(file, "trap %3d %12llu %12llu %10u\n",
728                            i, rx_packets, rx_bytes, rx_dropped);
729         }
730         rx_invalid = 0;
731         for_each_possible_cpu(j) {
732                 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
733                 rx_invalid += p->trap_rx_invalid;
734         }
735         seq_printf(file, "trap INV                           %10u\n",
736                    rx_invalid);
737
738         for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
739                 rx_packets = 0;
740                 rx_bytes = 0;
741                 rx_dropped = 0;
742                 for_each_possible_cpu(j) {
743                         p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
744                         do {
745                                 start = u64_stats_fetch_begin(&p->syncp);
746                                 tmp_rx_packets = p->port_rx_packets[i];
747                                 tmp_rx_bytes = p->port_rx_bytes[i];
748                         } while (u64_stats_fetch_retry(&p->syncp, start));
749
750                         rx_packets += tmp_rx_packets;
751                         rx_bytes += tmp_rx_bytes;
752                         rx_dropped += p->port_rx_dropped[i];
753                 }
754                 seq_printf(file, "port %3d %12llu %12llu %10u\n",
755                            i, rx_packets, rx_bytes, rx_dropped);
756         }
757         rx_invalid = 0;
758         for_each_possible_cpu(j) {
759                 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
760                 rx_invalid += p->port_rx_invalid;
761         }
762         seq_printf(file, "port INV                           %10u\n",
763                    rx_invalid);
764         return 0;
765 }
766
767 static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
768 {
769         struct mlxsw_core *mlxsw_core = inode->i_private;
770
771         return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
772 }
773
774 static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
775         .owner = THIS_MODULE,
776         .open = mlxsw_core_rx_stats_dbg_open,
777         .release = single_release,
778         .read = seq_read,
779         .llseek = seq_lseek
780 };
781
782 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
783 {
784         spin_lock(&mlxsw_core_driver_list_lock);
785         list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
786         spin_unlock(&mlxsw_core_driver_list_lock);
787         return 0;
788 }
789 EXPORT_SYMBOL(mlxsw_core_driver_register);
790
791 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
792 {
793         spin_lock(&mlxsw_core_driver_list_lock);
794         list_del(&mlxsw_driver->list);
795         spin_unlock(&mlxsw_core_driver_list_lock);
796 }
797 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
798
799 static struct mlxsw_driver *__driver_find(const char *kind)
800 {
801         struct mlxsw_driver *mlxsw_driver;
802
803         list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
804                 if (strcmp(mlxsw_driver->kind, kind) == 0)
805                         return mlxsw_driver;
806         }
807         return NULL;
808 }
809
810 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
811 {
812         struct mlxsw_driver *mlxsw_driver;
813
814         spin_lock(&mlxsw_core_driver_list_lock);
815         mlxsw_driver = __driver_find(kind);
816         if (!mlxsw_driver) {
817                 spin_unlock(&mlxsw_core_driver_list_lock);
818                 request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
819                 spin_lock(&mlxsw_core_driver_list_lock);
820                 mlxsw_driver = __driver_find(kind);
821         }
822         if (mlxsw_driver) {
823                 if (!try_module_get(mlxsw_driver->owner))
824                         mlxsw_driver = NULL;
825         }
826
827         spin_unlock(&mlxsw_core_driver_list_lock);
828         return mlxsw_driver;
829 }
830
831 static void mlxsw_core_driver_put(const char *kind)
832 {
833         struct mlxsw_driver *mlxsw_driver;
834
835         spin_lock(&mlxsw_core_driver_list_lock);
836         mlxsw_driver = __driver_find(kind);
837         spin_unlock(&mlxsw_core_driver_list_lock);
838         if (!mlxsw_driver)
839                 return;
840         module_put(mlxsw_driver->owner);
841 }
842
843 static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
844 {
845         const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
846
847         mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
848                                                  mlxsw_core_dbg_root);
849         if (!mlxsw_core->dbg_dir)
850                 return -ENOMEM;
851         debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
852                             mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
853         mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
854         mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
855         debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
856                             &mlxsw_core->dbg.vsd_blob);
857         mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
858         mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
859         debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
860                             &mlxsw_core->dbg.psid_blob);
861         return 0;
862 }
863
864 static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
865 {
866         debugfs_remove_recursive(mlxsw_core->dbg_dir);
867 }
868
869 static int mlxsw_devlink_port_split(struct devlink *devlink,
870                                     unsigned int port_index,
871                                     unsigned int count)
872 {
873         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
874
875         if (port_index >= MLXSW_PORT_MAX_PORTS)
876                 return -EINVAL;
877         if (!mlxsw_core->driver->port_split)
878                 return -EOPNOTSUPP;
879         return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
880 }
881
882 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
883                                       unsigned int port_index)
884 {
885         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
886
887         if (port_index >= MLXSW_PORT_MAX_PORTS)
888                 return -EINVAL;
889         if (!mlxsw_core->driver->port_unsplit)
890                 return -EOPNOTSUPP;
891         return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
892 }
893
894 static int
895 mlxsw_devlink_sb_pool_get(struct devlink *devlink,
896                           unsigned int sb_index, u16 pool_index,
897                           struct devlink_sb_pool_info *pool_info)
898 {
899         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
900         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
901
902         if (!mlxsw_driver->sb_pool_get)
903                 return -EOPNOTSUPP;
904         return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
905                                          pool_index, pool_info);
906 }
907
908 static int
909 mlxsw_devlink_sb_pool_set(struct devlink *devlink,
910                           unsigned int sb_index, u16 pool_index, u32 size,
911                           enum devlink_sb_threshold_type threshold_type)
912 {
913         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
914         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
915
916         if (!mlxsw_driver->sb_pool_set)
917                 return -EOPNOTSUPP;
918         return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
919                                          pool_index, size, threshold_type);
920 }
921
922 static void *__dl_port(struct devlink_port *devlink_port)
923 {
924         return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
925 }
926
927 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
928                                           unsigned int sb_index, u16 pool_index,
929                                           u32 *p_threshold)
930 {
931         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
932         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
933         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
934
935         if (!mlxsw_driver->sb_port_pool_get)
936                 return -EOPNOTSUPP;
937         return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
938                                               pool_index, p_threshold);
939 }
940
941 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
942                                           unsigned int sb_index, u16 pool_index,
943                                           u32 threshold)
944 {
945         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
946         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
947         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
948
949         if (!mlxsw_driver->sb_port_pool_set)
950                 return -EOPNOTSUPP;
951         return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
952                                               pool_index, threshold);
953 }
954
955 static int
956 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
957                                   unsigned int sb_index, u16 tc_index,
958                                   enum devlink_sb_pool_type pool_type,
959                                   u16 *p_pool_index, u32 *p_threshold)
960 {
961         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
962         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
963         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
964
965         if (!mlxsw_driver->sb_tc_pool_bind_get)
966                 return -EOPNOTSUPP;
967         return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
968                                                  tc_index, pool_type,
969                                                  p_pool_index, p_threshold);
970 }
971
972 static int
973 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
974                                   unsigned int sb_index, u16 tc_index,
975                                   enum devlink_sb_pool_type pool_type,
976                                   u16 pool_index, u32 threshold)
977 {
978         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
979         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
980         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
981
982         if (!mlxsw_driver->sb_tc_pool_bind_set)
983                 return -EOPNOTSUPP;
984         return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
985                                                  tc_index, pool_type,
986                                                  pool_index, threshold);
987 }
988
989 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
990                                          unsigned int sb_index)
991 {
992         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
993         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
994
995         if (!mlxsw_driver->sb_occ_snapshot)
996                 return -EOPNOTSUPP;
997         return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
998 }
999
1000 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
1001                                           unsigned int sb_index)
1002 {
1003         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1004         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1005
1006         if (!mlxsw_driver->sb_occ_max_clear)
1007                 return -EOPNOTSUPP;
1008         return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
1009 }
1010
1011 static int
1012 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
1013                                    unsigned int sb_index, u16 pool_index,
1014                                    u32 *p_cur, u32 *p_max)
1015 {
1016         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1017         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1018         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1019
1020         if (!mlxsw_driver->sb_occ_port_pool_get)
1021                 return -EOPNOTSUPP;
1022         return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
1023                                                   pool_index, p_cur, p_max);
1024 }
1025
1026 static int
1027 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
1028                                       unsigned int sb_index, u16 tc_index,
1029                                       enum devlink_sb_pool_type pool_type,
1030                                       u32 *p_cur, u32 *p_max)
1031 {
1032         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1033         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1034         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1035
1036         if (!mlxsw_driver->sb_occ_tc_port_bind_get)
1037                 return -EOPNOTSUPP;
1038         return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
1039                                                      sb_index, tc_index,
1040                                                      pool_type, p_cur, p_max);
1041 }
1042
1043 static const struct devlink_ops mlxsw_devlink_ops = {
1044         .port_split                     = mlxsw_devlink_port_split,
1045         .port_unsplit                   = mlxsw_devlink_port_unsplit,
1046         .sb_pool_get                    = mlxsw_devlink_sb_pool_get,
1047         .sb_pool_set                    = mlxsw_devlink_sb_pool_set,
1048         .sb_port_pool_get               = mlxsw_devlink_sb_port_pool_get,
1049         .sb_port_pool_set               = mlxsw_devlink_sb_port_pool_set,
1050         .sb_tc_pool_bind_get            = mlxsw_devlink_sb_tc_pool_bind_get,
1051         .sb_tc_pool_bind_set            = mlxsw_devlink_sb_tc_pool_bind_set,
1052         .sb_occ_snapshot                = mlxsw_devlink_sb_occ_snapshot,
1053         .sb_occ_max_clear               = mlxsw_devlink_sb_occ_max_clear,
1054         .sb_occ_port_pool_get           = mlxsw_devlink_sb_occ_port_pool_get,
1055         .sb_occ_tc_port_bind_get        = mlxsw_devlink_sb_occ_tc_port_bind_get,
1056 };
1057
1058 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1059                                    const struct mlxsw_bus *mlxsw_bus,
1060                                    void *bus_priv)
1061 {
1062         const char *device_kind = mlxsw_bus_info->device_kind;
1063         struct mlxsw_core *mlxsw_core;
1064         struct mlxsw_driver *mlxsw_driver;
1065         struct devlink *devlink;
1066         size_t alloc_size;
1067         int err;
1068
1069         mlxsw_driver = mlxsw_core_driver_get(device_kind);
1070         if (!mlxsw_driver)
1071                 return -EINVAL;
1072         alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
1073         devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
1074         if (!devlink) {
1075                 err = -ENOMEM;
1076                 goto err_devlink_alloc;
1077         }
1078
1079         mlxsw_core = devlink_priv(devlink);
1080         INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
1081         INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
1082         mlxsw_core->driver = mlxsw_driver;
1083         mlxsw_core->bus = mlxsw_bus;
1084         mlxsw_core->bus_priv = bus_priv;
1085         mlxsw_core->bus_info = mlxsw_bus_info;
1086
1087         mlxsw_core->pcpu_stats =
1088                 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
1089         if (!mlxsw_core->pcpu_stats) {
1090                 err = -ENOMEM;
1091                 goto err_alloc_stats;
1092         }
1093
1094         if (mlxsw_driver->profile->used_max_lag &&
1095             mlxsw_driver->profile->used_max_port_per_lag) {
1096                 alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag *
1097                              mlxsw_driver->profile->max_port_per_lag;
1098                 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1099                 if (!mlxsw_core->lag.mapping) {
1100                         err = -ENOMEM;
1101                         goto err_alloc_lag_mapping;
1102                 }
1103         }
1104
1105         err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
1106         if (err)
1107                 goto err_bus_init;
1108
1109         err = mlxsw_emad_init(mlxsw_core);
1110         if (err)
1111                 goto err_emad_init;
1112
1113         err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1114         if (err)
1115                 goto err_hwmon_init;
1116
1117         err = devlink_register(devlink, mlxsw_bus_info->dev);
1118         if (err)
1119                 goto err_devlink_register;
1120
1121         err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
1122         if (err)
1123                 goto err_driver_init;
1124
1125         err = mlxsw_core_debugfs_init(mlxsw_core);
1126         if (err)
1127                 goto err_debugfs_init;
1128
1129         return 0;
1130
1131 err_debugfs_init:
1132         mlxsw_core->driver->fini(mlxsw_core);
1133 err_driver_init:
1134         devlink_unregister(devlink);
1135 err_devlink_register:
1136 err_hwmon_init:
1137         mlxsw_emad_fini(mlxsw_core);
1138 err_emad_init:
1139         mlxsw_bus->fini(bus_priv);
1140 err_bus_init:
1141         kfree(mlxsw_core->lag.mapping);
1142 err_alloc_lag_mapping:
1143         free_percpu(mlxsw_core->pcpu_stats);
1144 err_alloc_stats:
1145         devlink_free(devlink);
1146 err_devlink_alloc:
1147         mlxsw_core_driver_put(device_kind);
1148         return err;
1149 }
1150 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
1151
1152 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
1153 {
1154         const char *device_kind = mlxsw_core->bus_info->device_kind;
1155         struct devlink *devlink = priv_to_devlink(mlxsw_core);
1156
1157         mlxsw_core_debugfs_fini(mlxsw_core);
1158         mlxsw_core->driver->fini(mlxsw_core);
1159         devlink_unregister(devlink);
1160         mlxsw_emad_fini(mlxsw_core);
1161         mlxsw_core->bus->fini(mlxsw_core->bus_priv);
1162         kfree(mlxsw_core->lag.mapping);
1163         free_percpu(mlxsw_core->pcpu_stats);
1164         devlink_free(devlink);
1165         mlxsw_core_driver_put(device_kind);
1166 }
1167 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
1168
1169 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
1170                                   const struct mlxsw_tx_info *tx_info)
1171 {
1172         return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
1173                                                   tx_info);
1174 }
1175 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
1176
1177 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1178                             const struct mlxsw_tx_info *tx_info)
1179 {
1180         return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
1181                                              tx_info);
1182 }
1183 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
1184
1185 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
1186                                    const struct mlxsw_rx_listener *rxl_b)
1187 {
1188         return (rxl_a->func == rxl_b->func &&
1189                 rxl_a->local_port == rxl_b->local_port &&
1190                 rxl_a->trap_id == rxl_b->trap_id);
1191 }
1192
1193 static struct mlxsw_rx_listener_item *
1194 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
1195                         const struct mlxsw_rx_listener *rxl,
1196                         void *priv)
1197 {
1198         struct mlxsw_rx_listener_item *rxl_item;
1199
1200         list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
1201                 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
1202                     rxl_item->priv == priv)
1203                         return rxl_item;
1204         }
1205         return NULL;
1206 }
1207
1208 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
1209                                     const struct mlxsw_rx_listener *rxl,
1210                                     void *priv)
1211 {
1212         struct mlxsw_rx_listener_item *rxl_item;
1213
1214         rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1215         if (rxl_item)
1216                 return -EEXIST;
1217         rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
1218         if (!rxl_item)
1219                 return -ENOMEM;
1220         rxl_item->rxl = *rxl;
1221         rxl_item->priv = priv;
1222
1223         list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
1224         return 0;
1225 }
1226 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
1227
1228 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
1229                                        const struct mlxsw_rx_listener *rxl,
1230                                        void *priv)
1231 {
1232         struct mlxsw_rx_listener_item *rxl_item;
1233
1234         rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1235         if (!rxl_item)
1236                 return;
1237         list_del_rcu(&rxl_item->list);
1238         synchronize_rcu();
1239         kfree(rxl_item);
1240 }
1241 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
1242
1243 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
1244                                            void *priv)
1245 {
1246         struct mlxsw_event_listener_item *event_listener_item = priv;
1247         struct mlxsw_reg_info reg;
1248         char *payload;
1249         char *op_tlv = mlxsw_emad_op_tlv(skb);
1250         char *reg_tlv = mlxsw_emad_reg_tlv(skb);
1251
1252         reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
1253         reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
1254         payload = mlxsw_emad_reg_payload(op_tlv);
1255         event_listener_item->el.func(&reg, payload, event_listener_item->priv);
1256         dev_kfree_skb(skb);
1257 }
1258
1259 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
1260                                       const struct mlxsw_event_listener *el_b)
1261 {
1262         return (el_a->func == el_b->func &&
1263                 el_a->trap_id == el_b->trap_id);
1264 }
1265
1266 static struct mlxsw_event_listener_item *
1267 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
1268                            const struct mlxsw_event_listener *el,
1269                            void *priv)
1270 {
1271         struct mlxsw_event_listener_item *el_item;
1272
1273         list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1274                 if (__is_event_listener_equal(&el_item->el, el) &&
1275                     el_item->priv == priv)
1276                         return el_item;
1277         }
1278         return NULL;
1279 }
1280
1281 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1282                                        const struct mlxsw_event_listener *el,
1283                                        void *priv)
1284 {
1285         int err;
1286         struct mlxsw_event_listener_item *el_item;
1287         const struct mlxsw_rx_listener rxl = {
1288                 .func = mlxsw_core_event_listener_func,
1289                 .local_port = MLXSW_PORT_DONT_CARE,
1290                 .trap_id = el->trap_id,
1291         };
1292
1293         el_item = __find_event_listener_item(mlxsw_core, el, priv);
1294         if (el_item)
1295                 return -EEXIST;
1296         el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1297         if (!el_item)
1298                 return -ENOMEM;
1299         el_item->el = *el;
1300         el_item->priv = priv;
1301
1302         err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1303         if (err)
1304                 goto err_rx_listener_register;
1305
1306         /* No reason to save item if we did not manage to register an RX
1307          * listener for it.
1308          */
1309         list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1310
1311         return 0;
1312
1313 err_rx_listener_register:
1314         kfree(el_item);
1315         return err;
1316 }
1317 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1318
1319 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1320                                           const struct mlxsw_event_listener *el,
1321                                           void *priv)
1322 {
1323         struct mlxsw_event_listener_item *el_item;
1324         const struct mlxsw_rx_listener rxl = {
1325                 .func = mlxsw_core_event_listener_func,
1326                 .local_port = MLXSW_PORT_DONT_CARE,
1327                 .trap_id = el->trap_id,
1328         };
1329
1330         el_item = __find_event_listener_item(mlxsw_core, el, priv);
1331         if (!el_item)
1332                 return;
1333         mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1334         list_del(&el_item->list);
1335         kfree(el_item);
1336 }
1337 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1338
1339 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
1340 {
1341         return atomic64_inc_return(&mlxsw_core->emad.tid);
1342 }
1343
1344 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1345                                       const struct mlxsw_reg_info *reg,
1346                                       char *payload,
1347                                       enum mlxsw_core_reg_access_type type,
1348                                       struct list_head *bulk_list,
1349                                       mlxsw_reg_trans_cb_t *cb,
1350                                       unsigned long cb_priv)
1351 {
1352         u64 tid = mlxsw_core_tid_get(mlxsw_core);
1353         struct mlxsw_reg_trans *trans;
1354         int err;
1355
1356         trans = kzalloc(sizeof(*trans), GFP_KERNEL);
1357         if (!trans)
1358                 return -ENOMEM;
1359
1360         err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1361                                     bulk_list, cb, cb_priv, tid);
1362         if (err) {
1363                 kfree(trans);
1364                 return err;
1365         }
1366         return 0;
1367 }
1368
1369 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
1370                           const struct mlxsw_reg_info *reg, char *payload,
1371                           struct list_head *bulk_list,
1372                           mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1373 {
1374         return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1375                                           MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
1376                                           bulk_list, cb, cb_priv);
1377 }
1378 EXPORT_SYMBOL(mlxsw_reg_trans_query);
1379
1380 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
1381                           const struct mlxsw_reg_info *reg, char *payload,
1382                           struct list_head *bulk_list,
1383                           mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1384 {
1385         return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1386                                           MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
1387                                           bulk_list, cb, cb_priv);
1388 }
1389 EXPORT_SYMBOL(mlxsw_reg_trans_write);
1390
1391 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
1392 {
1393         struct mlxsw_core *mlxsw_core = trans->core;
1394         int err;
1395
1396         wait_for_completion(&trans->completion);
1397         cancel_delayed_work_sync(&trans->timeout_dw);
1398         err = trans->err;
1399
1400         if (trans->retries)
1401                 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
1402                          trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
1403         if (err)
1404                 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1405                         trans->tid, trans->reg->id,
1406                         mlxsw_reg_id_str(trans->reg->id),
1407                         mlxsw_core_reg_access_type_str(trans->type),
1408                         trans->emad_status,
1409                         mlxsw_emad_op_tlv_status_str(trans->emad_status));
1410
1411         list_del(&trans->bulk_list);
1412         kfree_rcu(trans, rcu);
1413         return err;
1414 }
1415
1416 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
1417 {
1418         struct mlxsw_reg_trans *trans;
1419         struct mlxsw_reg_trans *tmp;
1420         int sum_err = 0;
1421         int err;
1422
1423         list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
1424                 err = mlxsw_reg_trans_wait(trans);
1425                 if (err && sum_err == 0)
1426                         sum_err = err; /* first error to be returned */
1427         }
1428         return sum_err;
1429 }
1430 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
1431
1432 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1433                                      const struct mlxsw_reg_info *reg,
1434                                      char *payload,
1435                                      enum mlxsw_core_reg_access_type type)
1436 {
1437         enum mlxsw_emad_op_tlv_status status;
1438         int err, n_retry;
1439         char *in_mbox, *out_mbox, *tmp;
1440
1441         dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1442                 reg->id, mlxsw_reg_id_str(reg->id),
1443                 mlxsw_core_reg_access_type_str(type));
1444
1445         in_mbox = mlxsw_cmd_mbox_alloc();
1446         if (!in_mbox)
1447                 return -ENOMEM;
1448
1449         out_mbox = mlxsw_cmd_mbox_alloc();
1450         if (!out_mbox) {
1451                 err = -ENOMEM;
1452                 goto free_in_mbox;
1453         }
1454
1455         mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
1456                                mlxsw_core_tid_get(mlxsw_core));
1457         tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1458         mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1459
1460         n_retry = 0;
1461 retry:
1462         err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1463         if (!err) {
1464                 err = mlxsw_emad_process_status(out_mbox, &status);
1465                 if (err) {
1466                         if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1467                                 goto retry;
1468                         dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
1469                                 status, mlxsw_emad_op_tlv_status_str(status));
1470                 }
1471         }
1472
1473         if (!err)
1474                 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1475                        reg->len);
1476
1477         mlxsw_cmd_mbox_free(out_mbox);
1478 free_in_mbox:
1479         mlxsw_cmd_mbox_free(in_mbox);
1480         if (err)
1481                 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1482                         reg->id, mlxsw_reg_id_str(reg->id),
1483                         mlxsw_core_reg_access_type_str(type));
1484         return err;
1485 }
1486
1487 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
1488                                      char *payload, size_t payload_len,
1489                                      unsigned long cb_priv)
1490 {
1491         char *orig_payload = (char *) cb_priv;
1492
1493         memcpy(orig_payload, payload, payload_len);
1494 }
1495
1496 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1497                                  const struct mlxsw_reg_info *reg,
1498                                  char *payload,
1499                                  enum mlxsw_core_reg_access_type type)
1500 {
1501         LIST_HEAD(bulk_list);
1502         int err;
1503
1504         /* During initialization EMAD interface is not available to us,
1505          * so we default to command interface. We switch to EMAD interface
1506          * after setting the appropriate traps.
1507          */
1508         if (!mlxsw_core->emad.use_emad)
1509                 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1510                                                  payload, type);
1511
1512         err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1513                                          payload, type, &bulk_list,
1514                                          mlxsw_core_reg_access_cb,
1515                                          (unsigned long) payload);
1516         if (err)
1517                 return err;
1518         return mlxsw_reg_trans_bulk_wait(&bulk_list);
1519 }
1520
1521 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1522                     const struct mlxsw_reg_info *reg, char *payload)
1523 {
1524         return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1525                                      MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1526 }
1527 EXPORT_SYMBOL(mlxsw_reg_query);
1528
1529 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1530                     const struct mlxsw_reg_info *reg, char *payload)
1531 {
1532         return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1533                                      MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1534 }
1535 EXPORT_SYMBOL(mlxsw_reg_write);
1536
1537 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1538                             struct mlxsw_rx_info *rx_info)
1539 {
1540         struct mlxsw_rx_listener_item *rxl_item;
1541         const struct mlxsw_rx_listener *rxl;
1542         struct mlxsw_core_pcpu_stats *pcpu_stats;
1543         u8 local_port;
1544         bool found = false;
1545
1546         if (rx_info->is_lag) {
1547                 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1548                                     __func__, rx_info->u.lag_id,
1549                                     rx_info->trap_id);
1550                 /* Upper layer does not care if the skb came from LAG or not,
1551                  * so just get the local_port for the lag port and push it up.
1552                  */
1553                 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1554                                                         rx_info->u.lag_id,
1555                                                         rx_info->lag_port_index);
1556         } else {
1557                 local_port = rx_info->u.sys_port;
1558         }
1559
1560         dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1561                             __func__, local_port, rx_info->trap_id);
1562
1563         if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1564             (local_port >= MLXSW_PORT_MAX_PORTS))
1565                 goto drop;
1566
1567         rcu_read_lock();
1568         list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1569                 rxl = &rxl_item->rxl;
1570                 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1571                      rxl->local_port == local_port) &&
1572                     rxl->trap_id == rx_info->trap_id) {
1573                         found = true;
1574                         break;
1575                 }
1576         }
1577         rcu_read_unlock();
1578         if (!found)
1579                 goto drop;
1580
1581         pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
1582         u64_stats_update_begin(&pcpu_stats->syncp);
1583         pcpu_stats->port_rx_packets[local_port]++;
1584         pcpu_stats->port_rx_bytes[local_port] += skb->len;
1585         pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
1586         pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
1587         u64_stats_update_end(&pcpu_stats->syncp);
1588
1589         rxl->func(skb, local_port, rxl_item->priv);
1590         return;
1591
1592 drop:
1593         if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
1594                 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
1595         else
1596                 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
1597         if (local_port >= MLXSW_PORT_MAX_PORTS)
1598                 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
1599         else
1600                 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
1601         dev_kfree_skb(skb);
1602 }
1603 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1604
1605 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1606                                         u16 lag_id, u8 port_index)
1607 {
1608         return mlxsw_core->driver->profile->max_port_per_lag * lag_id +
1609                port_index;
1610 }
1611
1612 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1613                                 u16 lag_id, u8 port_index, u8 local_port)
1614 {
1615         int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1616                                                  lag_id, port_index);
1617
1618         mlxsw_core->lag.mapping[index] = local_port;
1619 }
1620 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1621
1622 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1623                               u16 lag_id, u8 port_index)
1624 {
1625         int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1626                                                  lag_id, port_index);
1627
1628         return mlxsw_core->lag.mapping[index];
1629 }
1630 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1631
1632 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1633                                   u16 lag_id, u8 local_port)
1634 {
1635         int i;
1636
1637         for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) {
1638                 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1639                                                          lag_id, i);
1640
1641                 if (mlxsw_core->lag.mapping[index] == local_port)
1642                         mlxsw_core->lag.mapping[index] = 0;
1643         }
1644 }
1645 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1646
1647 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
1648                          struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
1649                          struct net_device *dev, bool split, u32 split_group)
1650 {
1651         struct devlink *devlink = priv_to_devlink(mlxsw_core);
1652         struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1653
1654         if (split)
1655                 devlink_port_split_set(devlink_port, split_group);
1656         devlink_port_type_eth_set(devlink_port, dev);
1657         return devlink_port_register(devlink, devlink_port, local_port);
1658 }
1659 EXPORT_SYMBOL(mlxsw_core_port_init);
1660
1661 void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
1662 {
1663         struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1664
1665         devlink_port_unregister(devlink_port);
1666 }
1667 EXPORT_SYMBOL(mlxsw_core_port_fini);
1668
1669 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
1670                                     const char *buf, size_t size)
1671 {
1672         __be32 *m = (__be32 *) buf;
1673         int i;
1674         int count = size / sizeof(__be32);
1675
1676         for (i = count - 1; i >= 0; i--)
1677                 if (m[i])
1678                         break;
1679         i++;
1680         count = i ? i : 1;
1681         for (i = 0; i < count; i += 4)
1682                 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
1683                         i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
1684                         be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
1685 }
1686
1687 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1688                    u32 in_mod, bool out_mbox_direct,
1689                    char *in_mbox, size_t in_mbox_size,
1690                    char *out_mbox, size_t out_mbox_size)
1691 {
1692         u8 status;
1693         int err;
1694
1695         BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1696         if (!mlxsw_core->bus->cmd_exec)
1697                 return -EOPNOTSUPP;
1698
1699         dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1700                 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1701         if (in_mbox) {
1702                 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1703                 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1704         }
1705
1706         err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1707                                         opcode_mod, in_mod, out_mbox_direct,
1708                                         in_mbox, in_mbox_size,
1709                                         out_mbox, out_mbox_size, &status);
1710
1711         if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1712                 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1713                         opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1714                         in_mod, status, mlxsw_cmd_status_str(status));
1715         } else if (err == -ETIMEDOUT) {
1716                 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1717                         opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1718                         in_mod);
1719         }
1720
1721         if (!err && out_mbox) {
1722                 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1723                 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1724         }
1725         return err;
1726 }
1727 EXPORT_SYMBOL(mlxsw_cmd_exec);
1728
1729 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
1730 {
1731         return queue_delayed_work(mlxsw_wq, dwork, delay);
1732 }
1733 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
1734
1735 static int __init mlxsw_core_module_init(void)
1736 {
1737         int err;
1738
1739         mlxsw_wq = create_workqueue(mlxsw_core_driver_name);
1740         if (!mlxsw_wq)
1741                 return -ENOMEM;
1742         mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
1743         if (!mlxsw_core_dbg_root) {
1744                 err = -ENOMEM;
1745                 goto err_debugfs_create_dir;
1746         }
1747         return 0;
1748
1749 err_debugfs_create_dir:
1750         destroy_workqueue(mlxsw_wq);
1751         return err;
1752 }
1753
1754 static void __exit mlxsw_core_module_exit(void)
1755 {
1756         debugfs_remove_recursive(mlxsw_core_dbg_root);
1757         destroy_workqueue(mlxsw_wq);
1758 }
1759
1760 module_init(mlxsw_core_module_init);
1761 module_exit(mlxsw_core_module_exit);
1762
1763 MODULE_LICENSE("Dual BSD/GPL");
1764 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1765 MODULE_DESCRIPTION("Mellanox switch device core driver");