ASoC: wm8904: add new compatible string
[cascardo/linux.git] / drivers / gpu / drm / amd / amdkfd / kfd_chardev.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/device.h>
24 #include <linux/export.h>
25 #include <linux/err.h>
26 #include <linux/fs.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/uaccess.h>
30 #include <linux/compat.h>
31 #include <uapi/linux/kfd_ioctl.h>
32 #include <linux/time.h>
33 #include <linux/mm.h>
34 #include <linux/uaccess.h>
35 #include <uapi/asm-generic/mman-common.h>
36 #include <asm/processor.h>
37 #include "kfd_priv.h"
38 #include "kfd_device_queue_manager.h"
39
40 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
41 static int kfd_open(struct inode *, struct file *);
42 static int kfd_mmap(struct file *, struct vm_area_struct *);
43
44 static const char kfd_dev_name[] = "kfd";
45
46 static const struct file_operations kfd_fops = {
47         .owner = THIS_MODULE,
48         .unlocked_ioctl = kfd_ioctl,
49         .compat_ioctl = kfd_ioctl,
50         .open = kfd_open,
51         .mmap = kfd_mmap,
52 };
53
54 static int kfd_char_dev_major = -1;
55 static struct class *kfd_class;
56 struct device *kfd_device;
57
58 int kfd_chardev_init(void)
59 {
60         int err = 0;
61
62         kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
63         err = kfd_char_dev_major;
64         if (err < 0)
65                 goto err_register_chrdev;
66
67         kfd_class = class_create(THIS_MODULE, kfd_dev_name);
68         err = PTR_ERR(kfd_class);
69         if (IS_ERR(kfd_class))
70                 goto err_class_create;
71
72         kfd_device = device_create(kfd_class, NULL,
73                                         MKDEV(kfd_char_dev_major, 0),
74                                         NULL, kfd_dev_name);
75         err = PTR_ERR(kfd_device);
76         if (IS_ERR(kfd_device))
77                 goto err_device_create;
78
79         return 0;
80
81 err_device_create:
82         class_destroy(kfd_class);
83 err_class_create:
84         unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
85 err_register_chrdev:
86         return err;
87 }
88
89 void kfd_chardev_exit(void)
90 {
91         device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
92         class_destroy(kfd_class);
93         unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
94 }
95
96 struct device *kfd_chardev(void)
97 {
98         return kfd_device;
99 }
100
101
102 static int kfd_open(struct inode *inode, struct file *filep)
103 {
104         struct kfd_process *process;
105         bool is_32bit_user_mode;
106
107         if (iminor(inode) != 0)
108                 return -ENODEV;
109
110         is_32bit_user_mode = is_compat_task();
111
112         if (is_32bit_user_mode == true) {
113                 dev_warn(kfd_device,
114                         "Process %d (32-bit) failed to open /dev/kfd\n"
115                         "32-bit processes are not supported by amdkfd\n",
116                         current->pid);
117                 return -EPERM;
118         }
119
120         process = kfd_create_process(current);
121         if (IS_ERR(process))
122                 return PTR_ERR(process);
123
124         process->is_32bit_user_mode = is_32bit_user_mode;
125
126         dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
127                 process->pasid, process->is_32bit_user_mode);
128
129         kfd_init_apertures(process);
130
131         return 0;
132 }
133
134 static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
135                                         void __user *arg)
136 {
137         struct kfd_ioctl_get_version_args args;
138         int err = 0;
139
140         args.major_version = KFD_IOCTL_MAJOR_VERSION;
141         args.minor_version = KFD_IOCTL_MINOR_VERSION;
142
143         if (copy_to_user(arg, &args, sizeof(args)))
144                 err = -EFAULT;
145
146         return err;
147 }
148
149 static int set_queue_properties_from_user(struct queue_properties *q_properties,
150                                 struct kfd_ioctl_create_queue_args *args)
151 {
152         if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
153                 pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
154                 return -EINVAL;
155         }
156
157         if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
158                 pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
159                 return -EINVAL;
160         }
161
162         if ((args->ring_base_address) &&
163                 (!access_ok(VERIFY_WRITE,
164                         (const void __user *) args->ring_base_address,
165                         sizeof(uint64_t)))) {
166                 pr_err("kfd: can't access ring base address\n");
167                 return -EFAULT;
168         }
169
170         if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
171                 pr_err("kfd: ring size must be a power of 2 or 0\n");
172                 return -EINVAL;
173         }
174
175         if (!access_ok(VERIFY_WRITE,
176                         (const void __user *) args->read_pointer_address,
177                         sizeof(uint32_t))) {
178                 pr_err("kfd: can't access read pointer\n");
179                 return -EFAULT;
180         }
181
182         if (!access_ok(VERIFY_WRITE,
183                         (const void __user *) args->write_pointer_address,
184                         sizeof(uint32_t))) {
185                 pr_err("kfd: can't access write pointer\n");
186                 return -EFAULT;
187         }
188
189         q_properties->is_interop = false;
190         q_properties->queue_percent = args->queue_percentage;
191         q_properties->priority = args->queue_priority;
192         q_properties->queue_address = args->ring_base_address;
193         q_properties->queue_size = args->ring_size;
194         q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
195         q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
196         if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
197                 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
198                 q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
199         else
200                 return -ENOTSUPP;
201
202         if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
203                 q_properties->format = KFD_QUEUE_FORMAT_AQL;
204         else
205                 q_properties->format = KFD_QUEUE_FORMAT_PM4;
206
207         pr_debug("Queue Percentage (%d, %d)\n",
208                         q_properties->queue_percent, args->queue_percentage);
209
210         pr_debug("Queue Priority (%d, %d)\n",
211                         q_properties->priority, args->queue_priority);
212
213         pr_debug("Queue Address (0x%llX, 0x%llX)\n",
214                         q_properties->queue_address, args->ring_base_address);
215
216         pr_debug("Queue Size (0x%llX, %u)\n",
217                         q_properties->queue_size, args->ring_size);
218
219         pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n",
220                         (uint64_t) q_properties->read_ptr,
221                         (uint64_t) q_properties->write_ptr);
222
223         pr_debug("Queue Format (%d)\n", q_properties->format);
224
225         return 0;
226 }
227
228 static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
229                                         void __user *arg)
230 {
231         struct kfd_ioctl_create_queue_args args;
232         struct kfd_dev *dev;
233         int err = 0;
234         unsigned int queue_id;
235         struct kfd_process_device *pdd;
236         struct queue_properties q_properties;
237
238         memset(&q_properties, 0, sizeof(struct queue_properties));
239
240         if (copy_from_user(&args, arg, sizeof(args)))
241                 return -EFAULT;
242
243         pr_debug("kfd: creating queue ioctl\n");
244
245         err = set_queue_properties_from_user(&q_properties, &args);
246         if (err)
247                 return err;
248
249         dev = kfd_device_by_id(args.gpu_id);
250         if (dev == NULL)
251                 return -EINVAL;
252
253         mutex_lock(&p->mutex);
254
255         pdd = kfd_bind_process_to_device(dev, p);
256         if (IS_ERR(pdd)) {
257                 err = PTR_ERR(pdd);
258                 goto err_bind_process;
259         }
260
261         pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n",
262                         p->pasid,
263                         dev->id);
264
265         err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, 0,
266                                 KFD_QUEUE_TYPE_COMPUTE, &queue_id);
267         if (err != 0)
268                 goto err_create_queue;
269
270         args.queue_id = queue_id;
271
272         /* Return gpu_id as doorbell offset for mmap usage */
273         args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
274
275         if (copy_to_user(arg, &args, sizeof(args))) {
276                 err = -EFAULT;
277                 goto err_copy_args_out;
278         }
279
280         mutex_unlock(&p->mutex);
281
282         pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
283
284         pr_debug("ring buffer address == 0x%016llX\n",
285                         args.ring_base_address);
286
287         pr_debug("read ptr address    == 0x%016llX\n",
288                         args.read_pointer_address);
289
290         pr_debug("write ptr address   == 0x%016llX\n",
291                         args.write_pointer_address);
292
293         return 0;
294
295 err_copy_args_out:
296         pqm_destroy_queue(&p->pqm, queue_id);
297 err_create_queue:
298 err_bind_process:
299         mutex_unlock(&p->mutex);
300         return err;
301 }
302
303 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
304                                         void __user *arg)
305 {
306         int retval;
307         struct kfd_ioctl_destroy_queue_args args;
308
309         if (copy_from_user(&args, arg, sizeof(args)))
310                 return -EFAULT;
311
312         pr_debug("kfd: destroying queue id %d for PASID %d\n",
313                                 args.queue_id,
314                                 p->pasid);
315
316         mutex_lock(&p->mutex);
317
318         retval = pqm_destroy_queue(&p->pqm, args.queue_id);
319
320         mutex_unlock(&p->mutex);
321         return retval;
322 }
323
324 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
325                                         void __user *arg)
326 {
327         int retval;
328         struct kfd_ioctl_update_queue_args args;
329         struct queue_properties properties;
330
331         if (copy_from_user(&args, arg, sizeof(args)))
332                 return -EFAULT;
333
334         if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
335                 pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
336                 return -EINVAL;
337         }
338
339         if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
340                 pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
341                 return -EINVAL;
342         }
343
344         if ((args.ring_base_address) &&
345                 (!access_ok(VERIFY_WRITE,
346                         (const void __user *) args.ring_base_address,
347                         sizeof(uint64_t)))) {
348                 pr_err("kfd: can't access ring base address\n");
349                 return -EFAULT;
350         }
351
352         if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
353                 pr_err("kfd: ring size must be a power of 2 or 0\n");
354                 return -EINVAL;
355         }
356
357         properties.queue_address = args.ring_base_address;
358         properties.queue_size = args.ring_size;
359         properties.queue_percent = args.queue_percentage;
360         properties.priority = args.queue_priority;
361
362         pr_debug("kfd: updating queue id %d for PASID %d\n",
363                         args.queue_id, p->pasid);
364
365         mutex_lock(&p->mutex);
366
367         retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
368
369         mutex_unlock(&p->mutex);
370
371         return retval;
372 }
373
374 static long kfd_ioctl_set_memory_policy(struct file *filep,
375                                 struct kfd_process *p, void __user *arg)
376 {
377         struct kfd_ioctl_set_memory_policy_args args;
378         struct kfd_dev *dev;
379         int err = 0;
380         struct kfd_process_device *pdd;
381         enum cache_policy default_policy, alternate_policy;
382
383         if (copy_from_user(&args, arg, sizeof(args)))
384                 return -EFAULT;
385
386         if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
387             && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
388                 return -EINVAL;
389         }
390
391         if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
392             && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
393                 return -EINVAL;
394         }
395
396         dev = kfd_device_by_id(args.gpu_id);
397         if (dev == NULL)
398                 return -EINVAL;
399
400         mutex_lock(&p->mutex);
401
402         pdd = kfd_bind_process_to_device(dev, p);
403         if (IS_ERR(pdd)) {
404                 err = PTR_ERR(pdd);
405                 goto out;
406         }
407
408         default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
409                          ? cache_policy_coherent : cache_policy_noncoherent;
410
411         alternate_policy =
412                 (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
413                    ? cache_policy_coherent : cache_policy_noncoherent;
414
415         if (!dev->dqm->set_cache_memory_policy(dev->dqm,
416                                 &pdd->qpd,
417                                 default_policy,
418                                 alternate_policy,
419                                 (void __user *)args.alternate_aperture_base,
420                                 args.alternate_aperture_size))
421                 err = -EINVAL;
422
423 out:
424         mutex_unlock(&p->mutex);
425
426         return err;
427 }
428
429 static long kfd_ioctl_get_clock_counters(struct file *filep,
430                                 struct kfd_process *p, void __user *arg)
431 {
432         struct kfd_ioctl_get_clock_counters_args args;
433         struct kfd_dev *dev;
434         struct timespec time;
435
436         if (copy_from_user(&args, arg, sizeof(args)))
437                 return -EFAULT;
438
439         dev = kfd_device_by_id(args.gpu_id);
440         if (dev == NULL)
441                 return -EINVAL;
442
443         /* Reading GPU clock counter from KGD */
444         args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
445
446         /* No access to rdtsc. Using raw monotonic time */
447         getrawmonotonic(&time);
448         args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
449
450         get_monotonic_boottime(&time);
451         args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
452
453         /* Since the counter is in nano-seconds we use 1GHz frequency */
454         args.system_clock_freq = 1000000000;
455
456         if (copy_to_user(arg, &args, sizeof(args)))
457                 return -EFAULT;
458
459         return 0;
460 }
461
462
463 static int kfd_ioctl_get_process_apertures(struct file *filp,
464                                 struct kfd_process *p, void __user *arg)
465 {
466         struct kfd_ioctl_get_process_apertures_args args;
467         struct kfd_process_device_apertures *pAperture;
468         struct kfd_process_device *pdd;
469
470         dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
471
472         if (copy_from_user(&args, arg, sizeof(args)))
473                 return -EFAULT;
474
475         args.num_of_nodes = 0;
476
477         mutex_lock(&p->mutex);
478
479         /*if the process-device list isn't empty*/
480         if (kfd_has_process_device_data(p)) {
481                 /* Run over all pdd of the process */
482                 pdd = kfd_get_first_process_device_data(p);
483                 do {
484                         pAperture = &args.process_apertures[args.num_of_nodes];
485                         pAperture->gpu_id = pdd->dev->id;
486                         pAperture->lds_base = pdd->lds_base;
487                         pAperture->lds_limit = pdd->lds_limit;
488                         pAperture->gpuvm_base = pdd->gpuvm_base;
489                         pAperture->gpuvm_limit = pdd->gpuvm_limit;
490                         pAperture->scratch_base = pdd->scratch_base;
491                         pAperture->scratch_limit = pdd->scratch_limit;
492
493                         dev_dbg(kfd_device,
494                                 "node id %u\n", args.num_of_nodes);
495                         dev_dbg(kfd_device,
496                                 "gpu id %u\n", pdd->dev->id);
497                         dev_dbg(kfd_device,
498                                 "lds_base %llX\n", pdd->lds_base);
499                         dev_dbg(kfd_device,
500                                 "lds_limit %llX\n", pdd->lds_limit);
501                         dev_dbg(kfd_device,
502                                 "gpuvm_base %llX\n", pdd->gpuvm_base);
503                         dev_dbg(kfd_device,
504                                 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
505                         dev_dbg(kfd_device,
506                                 "scratch_base %llX\n", pdd->scratch_base);
507                         dev_dbg(kfd_device,
508                                 "scratch_limit %llX\n", pdd->scratch_limit);
509
510                         args.num_of_nodes++;
511                 } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
512                                 (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
513         }
514
515         mutex_unlock(&p->mutex);
516
517         if (copy_to_user(arg, &args, sizeof(args)))
518                 return -EFAULT;
519
520         return 0;
521 }
522
523 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
524 {
525         struct kfd_process *process;
526         long err = -EINVAL;
527
528         dev_dbg(kfd_device,
529                 "ioctl cmd 0x%x (#%d), arg 0x%lx\n",
530                 cmd, _IOC_NR(cmd), arg);
531
532         process = kfd_get_process(current);
533         if (IS_ERR(process))
534                 return PTR_ERR(process);
535
536         switch (cmd) {
537         case KFD_IOC_GET_VERSION:
538                 err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
539                 break;
540         case KFD_IOC_CREATE_QUEUE:
541                 err = kfd_ioctl_create_queue(filep, process,
542                                                 (void __user *)arg);
543                 break;
544
545         case KFD_IOC_DESTROY_QUEUE:
546                 err = kfd_ioctl_destroy_queue(filep, process,
547                                                 (void __user *)arg);
548                 break;
549
550         case KFD_IOC_SET_MEMORY_POLICY:
551                 err = kfd_ioctl_set_memory_policy(filep, process,
552                                                 (void __user *)arg);
553                 break;
554
555         case KFD_IOC_GET_CLOCK_COUNTERS:
556                 err = kfd_ioctl_get_clock_counters(filep, process,
557                                                 (void __user *)arg);
558                 break;
559
560         case KFD_IOC_GET_PROCESS_APERTURES:
561                 err = kfd_ioctl_get_process_apertures(filep, process,
562                                                 (void __user *)arg);
563                 break;
564
565         case KFD_IOC_UPDATE_QUEUE:
566                 err = kfd_ioctl_update_queue(filep, process,
567                                                 (void __user *)arg);
568                 break;
569
570         default:
571                 dev_err(kfd_device,
572                         "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
573                         cmd, arg);
574                 err = -EINVAL;
575                 break;
576         }
577
578         if (err < 0)
579                 dev_err(kfd_device,
580                         "ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
581                         err, cmd, _IOC_NR(cmd));
582
583         return err;
584 }
585
586 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
587 {
588         struct kfd_process *process;
589
590         process = kfd_get_process(current);
591         if (IS_ERR(process))
592                 return PTR_ERR(process);
593
594         return kfd_doorbell_mmap(process, vma);
595 }