Merge branch 'work.iget' into work.misc
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / pagealloc.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/delay.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/cmd.h>
39 #include "mlx5_core.h"
40
41 enum {
42         MLX5_PAGES_CANT_GIVE    = 0,
43         MLX5_PAGES_GIVE         = 1,
44         MLX5_PAGES_TAKE         = 2
45 };
46
47 enum {
48         MLX5_BOOT_PAGES         = 1,
49         MLX5_INIT_PAGES         = 2,
50         MLX5_POST_INIT_PAGES    = 3
51 };
52
53 struct mlx5_pages_req {
54         struct mlx5_core_dev *dev;
55         u16     func_id;
56         s32     npages;
57         struct work_struct work;
58 };
59
60 struct fw_page {
61         struct rb_node          rb_node;
62         u64                     addr;
63         struct page            *page;
64         u16                     func_id;
65         unsigned long           bitmask;
66         struct list_head        list;
67         unsigned                free_count;
68 };
69
70 struct mlx5_query_pages_inbox {
71         struct mlx5_inbox_hdr   hdr;
72         u8                      rsvd[8];
73 };
74
75 struct mlx5_query_pages_outbox {
76         struct mlx5_outbox_hdr  hdr;
77         __be16                  rsvd;
78         __be16                  func_id;
79         __be32                  num_pages;
80 };
81
82 struct mlx5_manage_pages_inbox {
83         struct mlx5_inbox_hdr   hdr;
84         __be16                  rsvd;
85         __be16                  func_id;
86         __be32                  num_entries;
87         __be64                  pas[0];
88 };
89
90 struct mlx5_manage_pages_outbox {
91         struct mlx5_outbox_hdr  hdr;
92         __be32                  num_entries;
93         u8                      rsvd[4];
94         __be64                  pas[0];
95 };
96
97 enum {
98         MAX_RECLAIM_TIME_MSECS  = 5000,
99         MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
100 };
101
102 enum {
103         MLX5_MAX_RECLAIM_TIME_MILI      = 5000,
104         MLX5_NUM_4K_IN_PAGE             = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
105 };
106
107 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
108 {
109         struct rb_root *root = &dev->priv.page_root;
110         struct rb_node **new = &root->rb_node;
111         struct rb_node *parent = NULL;
112         struct fw_page *nfp;
113         struct fw_page *tfp;
114         int i;
115
116         while (*new) {
117                 parent = *new;
118                 tfp = rb_entry(parent, struct fw_page, rb_node);
119                 if (tfp->addr < addr)
120                         new = &parent->rb_left;
121                 else if (tfp->addr > addr)
122                         new = &parent->rb_right;
123                 else
124                         return -EEXIST;
125         }
126
127         nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
128         if (!nfp)
129                 return -ENOMEM;
130
131         nfp->addr = addr;
132         nfp->page = page;
133         nfp->func_id = func_id;
134         nfp->free_count = MLX5_NUM_4K_IN_PAGE;
135         for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
136                 set_bit(i, &nfp->bitmask);
137
138         rb_link_node(&nfp->rb_node, parent, new);
139         rb_insert_color(&nfp->rb_node, root);
140         list_add(&nfp->list, &dev->priv.free_list);
141
142         return 0;
143 }
144
145 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
146 {
147         struct rb_root *root = &dev->priv.page_root;
148         struct rb_node *tmp = root->rb_node;
149         struct fw_page *result = NULL;
150         struct fw_page *tfp;
151
152         while (tmp) {
153                 tfp = rb_entry(tmp, struct fw_page, rb_node);
154                 if (tfp->addr < addr) {
155                         tmp = tmp->rb_left;
156                 } else if (tfp->addr > addr) {
157                         tmp = tmp->rb_right;
158                 } else {
159                         result = tfp;
160                         break;
161                 }
162         }
163
164         return result;
165 }
166
167 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
168                                 s32 *npages, int boot)
169 {
170         struct mlx5_query_pages_inbox   in;
171         struct mlx5_query_pages_outbox  out;
172         int err;
173
174         memset(&in, 0, sizeof(in));
175         memset(&out, 0, sizeof(out));
176         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
177         in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
178
179         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
180         if (err)
181                 return err;
182
183         if (out.hdr.status)
184                 return mlx5_cmd_status_to_err(&out.hdr);
185
186         *npages = be32_to_cpu(out.num_pages);
187         *func_id = be16_to_cpu(out.func_id);
188
189         return err;
190 }
191
192 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
193 {
194         struct fw_page *fp;
195         unsigned n;
196
197         if (list_empty(&dev->priv.free_list))
198                 return -ENOMEM;
199
200         fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
201         n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
202         if (n >= MLX5_NUM_4K_IN_PAGE) {
203                 mlx5_core_warn(dev, "alloc 4k bug\n");
204                 return -ENOENT;
205         }
206         clear_bit(n, &fp->bitmask);
207         fp->free_count--;
208         if (!fp->free_count)
209                 list_del(&fp->list);
210
211         *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
212
213         return 0;
214 }
215
216 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
217
218 static void free_4k(struct mlx5_core_dev *dev, u64 addr)
219 {
220         struct fw_page *fwp;
221         int n;
222
223         fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
224         if (!fwp) {
225                 mlx5_core_warn(dev, "page not found\n");
226                 return;
227         }
228
229         n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
230         fwp->free_count++;
231         set_bit(n, &fwp->bitmask);
232         if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
233                 rb_erase(&fwp->rb_node, &dev->priv.page_root);
234                 if (fwp->free_count != 1)
235                         list_del(&fwp->list);
236                 dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
237                                PAGE_SIZE, DMA_BIDIRECTIONAL);
238                 __free_page(fwp->page);
239                 kfree(fwp);
240         } else if (fwp->free_count == 1) {
241                 list_add(&fwp->list, &dev->priv.free_list);
242         }
243 }
244
245 static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
246 {
247         struct page *page;
248         u64 addr;
249         int err;
250         int nid = dev_to_node(&dev->pdev->dev);
251
252         page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
253         if (!page) {
254                 mlx5_core_warn(dev, "failed to allocate page\n");
255                 return -ENOMEM;
256         }
257         addr = dma_map_page(&dev->pdev->dev, page, 0,
258                             PAGE_SIZE, DMA_BIDIRECTIONAL);
259         if (dma_mapping_error(&dev->pdev->dev, addr)) {
260                 mlx5_core_warn(dev, "failed dma mapping page\n");
261                 err = -ENOMEM;
262                 goto out_alloc;
263         }
264         err = insert_page(dev, addr, page, func_id);
265         if (err) {
266                 mlx5_core_err(dev, "failed to track allocated page\n");
267                 goto out_mapping;
268         }
269
270         return 0;
271
272 out_mapping:
273         dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
274
275 out_alloc:
276         __free_page(page);
277
278         return err;
279 }
280
281 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
282 {
283         struct mlx5_manage_pages_inbox *in;
284         struct mlx5_manage_pages_outbox out;
285         int err;
286
287         in = kzalloc(sizeof(*in), GFP_KERNEL);
288         if (!in)
289                 return;
290
291         memset(&out, 0, sizeof(out));
292         in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
293         in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
294         in->func_id = cpu_to_be16(func_id);
295         err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
296         if (!err)
297                 err = mlx5_cmd_status_to_err(&out.hdr);
298
299         if (err)
300                 mlx5_core_warn(dev, "page notify failed\n");
301
302         kfree(in);
303 }
304
305 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
306                       int notify_fail)
307 {
308         struct mlx5_manage_pages_inbox *in;
309         struct mlx5_manage_pages_outbox out;
310         int inlen;
311         u64 addr;
312         int err;
313         int i;
314
315         inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
316         in = mlx5_vzalloc(inlen);
317         if (!in) {
318                 err = -ENOMEM;
319                 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
320                 goto out_free;
321         }
322         memset(&out, 0, sizeof(out));
323
324         for (i = 0; i < npages; i++) {
325 retry:
326                 err = alloc_4k(dev, &addr);
327                 if (err) {
328                         if (err == -ENOMEM)
329                                 err = alloc_system_page(dev, func_id);
330                         if (err)
331                                 goto out_4k;
332
333                         goto retry;
334                 }
335                 in->pas[i] = cpu_to_be64(addr);
336         }
337
338         in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
339         in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
340         in->func_id = cpu_to_be16(func_id);
341         in->num_entries = cpu_to_be32(npages);
342         err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
343         if (err) {
344                 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
345                                func_id, npages, err);
346                 goto out_4k;
347         }
348
349         err = mlx5_cmd_status_to_err(&out.hdr);
350         if (err) {
351                 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
352                                func_id, npages, out.hdr.status);
353                 goto out_4k;
354         }
355
356         dev->priv.fw_pages += npages;
357         if (func_id)
358                 dev->priv.vfs_pages += npages;
359
360         mlx5_core_dbg(dev, "err %d\n", err);
361
362         kvfree(in);
363         return 0;
364
365 out_4k:
366         for (i--; i >= 0; i--)
367                 free_4k(dev, be64_to_cpu(in->pas[i]));
368 out_free:
369         kvfree(in);
370         if (notify_fail)
371                 page_notify_fail(dev, func_id);
372         return err;
373 }
374
375 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
376                              struct mlx5_manage_pages_inbox *in, int in_size,
377                              struct mlx5_manage_pages_outbox *out, int out_size)
378 {
379         struct fw_page *fwp;
380         struct rb_node *p;
381         u32 npages;
382         u32 i = 0;
383
384         if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
385                 return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
386                                                   (u32 *)out, out_size);
387
388         npages = be32_to_cpu(in->num_entries);
389
390         p = rb_first(&dev->priv.page_root);
391         while (p && i < npages) {
392                 fwp = rb_entry(p, struct fw_page, rb_node);
393                 out->pas[i] = cpu_to_be64(fwp->addr);
394                 p = rb_next(p);
395                 i++;
396         }
397
398         out->num_entries = cpu_to_be32(i);
399         return 0;
400 }
401
402 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
403                          int *nclaimed)
404 {
405         struct mlx5_manage_pages_inbox   in;
406         struct mlx5_manage_pages_outbox *out;
407         int num_claimed;
408         int outlen;
409         u64 addr;
410         int err;
411         int i;
412
413         if (nclaimed)
414                 *nclaimed = 0;
415
416         memset(&in, 0, sizeof(in));
417         outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
418         out = mlx5_vzalloc(outlen);
419         if (!out)
420                 return -ENOMEM;
421
422         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
423         in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
424         in.func_id = cpu_to_be16(func_id);
425         in.num_entries = cpu_to_be32(npages);
426         mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
427         err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
428         if (err) {
429                 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
430                 goto out_free;
431         }
432
433         num_claimed = be32_to_cpu(out->num_entries);
434         if (num_claimed > npages) {
435                 mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
436                                num_claimed, npages);
437                 err = -EINVAL;
438                 goto out_free;
439         }
440
441         for (i = 0; i < num_claimed; i++) {
442                 addr = be64_to_cpu(out->pas[i]);
443                 free_4k(dev, addr);
444         }
445
446         if (nclaimed)
447                 *nclaimed = num_claimed;
448
449         dev->priv.fw_pages -= num_claimed;
450         if (func_id)
451                 dev->priv.vfs_pages -= num_claimed;
452
453 out_free:
454         kvfree(out);
455         return err;
456 }
457
458 static void pages_work_handler(struct work_struct *work)
459 {
460         struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
461         struct mlx5_core_dev *dev = req->dev;
462         int err = 0;
463
464         if (req->npages < 0)
465                 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
466         else if (req->npages > 0)
467                 err = give_pages(dev, req->func_id, req->npages, 1);
468
469         if (err)
470                 mlx5_core_warn(dev, "%s fail %d\n",
471                                req->npages < 0 ? "reclaim" : "give", err);
472
473         kfree(req);
474 }
475
476 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
477                                  s32 npages)
478 {
479         struct mlx5_pages_req *req;
480
481         req = kzalloc(sizeof(*req), GFP_ATOMIC);
482         if (!req) {
483                 mlx5_core_warn(dev, "failed to allocate pages request\n");
484                 return;
485         }
486
487         req->dev = dev;
488         req->func_id = func_id;
489         req->npages = npages;
490         INIT_WORK(&req->work, pages_work_handler);
491         queue_work(dev->priv.pg_wq, &req->work);
492 }
493
494 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
495 {
496         u16 uninitialized_var(func_id);
497         s32 uninitialized_var(npages);
498         int err;
499
500         err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
501         if (err)
502                 return err;
503
504         mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
505                       npages, boot ? "boot" : "init", func_id);
506
507         return give_pages(dev, func_id, npages, 0);
508 }
509
510 enum {
511         MLX5_BLKS_FOR_RECLAIM_PAGES = 12
512 };
513
514 static int optimal_reclaimed_pages(void)
515 {
516         struct mlx5_cmd_prot_block *block;
517         struct mlx5_cmd_layout *lay;
518         int ret;
519
520         ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
521                sizeof(struct mlx5_manage_pages_outbox)) /
522                FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
523
524         return ret;
525 }
526
527 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
528 {
529         unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
530         struct fw_page *fwp;
531         struct rb_node *p;
532         int nclaimed = 0;
533         int err = 0;
534
535         do {
536                 p = rb_first(&dev->priv.page_root);
537                 if (p) {
538                         fwp = rb_entry(p, struct fw_page, rb_node);
539                         err = reclaim_pages(dev, fwp->func_id,
540                                             optimal_reclaimed_pages(),
541                                             &nclaimed);
542
543                         if (err) {
544                                 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
545                                                err);
546                                 return err;
547                         }
548                         if (nclaimed)
549                                 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
550                 }
551                 if (time_after(jiffies, end)) {
552                         mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
553                         break;
554                 }
555         } while (p);
556
557         WARN(dev->priv.fw_pages,
558              "FW pages counter is %d after reclaiming all pages\n",
559              dev->priv.fw_pages);
560         WARN(dev->priv.vfs_pages,
561              "VFs FW pages counter is %d after reclaiming all pages\n",
562              dev->priv.vfs_pages);
563
564         return 0;
565 }
566
567 void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
568 {
569         dev->priv.page_root = RB_ROOT;
570         INIT_LIST_HEAD(&dev->priv.free_list);
571 }
572
573 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
574 {
575         /* nothing */
576 }
577
578 int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
579 {
580         dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
581         if (!dev->priv.pg_wq)
582                 return -ENOMEM;
583
584         return 0;
585 }
586
587 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
588 {
589         destroy_workqueue(dev->priv.pg_wq);
590 }
591
592 int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
593 {
594         unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
595         int prev_vfs_pages = dev->priv.vfs_pages;
596
597         mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
598                       dev->priv.name);
599         while (dev->priv.vfs_pages) {
600                 if (time_after(jiffies, end)) {
601                         mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages);
602                         return -ETIMEDOUT;
603                 }
604                 if (dev->priv.vfs_pages < prev_vfs_pages) {
605                         end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
606                         prev_vfs_pages = dev->priv.vfs_pages;
607                 }
608                 msleep(50);
609         }
610
611         mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name);
612         return 0;
613 }