regulator: rt5033-regulator: Use regulator_nodes/of_match in the descriptor
[cascardo/linux.git] / drivers / gpu / drm / amd / amdkfd / kfd_device.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/amd-iommu.h>
24 #include <linux/bsearch.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include "kfd_priv.h"
28 #include "kfd_device_queue_manager.h"
29
30 #define MQD_SIZE_ALIGNED 768
31
32 static const struct kfd_device_info kaveri_device_info = {
33         .max_pasid_bits = 16,
34         .ih_ring_entry_size = 4 * sizeof(uint32_t),
35         .mqd_size_aligned = MQD_SIZE_ALIGNED
36 };
37
38 struct kfd_deviceid {
39         unsigned short did;
40         const struct kfd_device_info *device_info;
41 };
42
43 /* Please keep this sorted by increasing device id. */
44 static const struct kfd_deviceid supported_devices[] = {
45         { 0x1304, &kaveri_device_info },        /* Kaveri */
46         { 0x1305, &kaveri_device_info },        /* Kaveri */
47         { 0x1306, &kaveri_device_info },        /* Kaveri */
48         { 0x1307, &kaveri_device_info },        /* Kaveri */
49         { 0x1309, &kaveri_device_info },        /* Kaveri */
50         { 0x130A, &kaveri_device_info },        /* Kaveri */
51         { 0x130B, &kaveri_device_info },        /* Kaveri */
52         { 0x130C, &kaveri_device_info },        /* Kaveri */
53         { 0x130D, &kaveri_device_info },        /* Kaveri */
54         { 0x130E, &kaveri_device_info },        /* Kaveri */
55         { 0x130F, &kaveri_device_info },        /* Kaveri */
56         { 0x1310, &kaveri_device_info },        /* Kaveri */
57         { 0x1311, &kaveri_device_info },        /* Kaveri */
58         { 0x1312, &kaveri_device_info },        /* Kaveri */
59         { 0x1313, &kaveri_device_info },        /* Kaveri */
60         { 0x1315, &kaveri_device_info },        /* Kaveri */
61         { 0x1316, &kaveri_device_info },        /* Kaveri */
62         { 0x1317, &kaveri_device_info },        /* Kaveri */
63         { 0x1318, &kaveri_device_info },        /* Kaveri */
64         { 0x131B, &kaveri_device_info },        /* Kaveri */
65         { 0x131C, &kaveri_device_info },        /* Kaveri */
66         { 0x131D, &kaveri_device_info },        /* Kaveri */
67 };
68
69 static const struct kfd_device_info *lookup_device_info(unsigned short did)
70 {
71         size_t i;
72
73         for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
74                 if (supported_devices[i].did == did) {
75                         BUG_ON(supported_devices[i].device_info == NULL);
76                         return supported_devices[i].device_info;
77                 }
78         }
79
80         return NULL;
81 }
82
83 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev)
84 {
85         struct kfd_dev *kfd;
86
87         const struct kfd_device_info *device_info =
88                                         lookup_device_info(pdev->device);
89
90         if (!device_info)
91                 return NULL;
92
93         kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
94         if (!kfd)
95                 return NULL;
96
97         kfd->kgd = kgd;
98         kfd->device_info = device_info;
99         kfd->pdev = pdev;
100         kfd->init_complete = false;
101
102         return kfd;
103 }
104
105 static bool device_iommu_pasid_init(struct kfd_dev *kfd)
106 {
107         const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
108                                         AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
109                                         AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
110
111         struct amd_iommu_device_info iommu_info;
112         unsigned int pasid_limit;
113         int err;
114
115         err = amd_iommu_device_info(kfd->pdev, &iommu_info);
116         if (err < 0) {
117                 dev_err(kfd_device,
118                         "error getting iommu info. is the iommu enabled?\n");
119                 return false;
120         }
121
122         if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
123                 dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n",
124                        (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
125                        (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
126                        (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0);
127                 return false;
128         }
129
130         pasid_limit = min_t(unsigned int,
131                         (unsigned int)1 << kfd->device_info->max_pasid_bits,
132                         iommu_info.max_pasids);
133         /*
134          * last pasid is used for kernel queues doorbells
135          * in the future the last pasid might be used for a kernel thread.
136          */
137         pasid_limit = min_t(unsigned int,
138                                 pasid_limit,
139                                 kfd->doorbell_process_limit - 1);
140
141         err = amd_iommu_init_device(kfd->pdev, pasid_limit);
142         if (err < 0) {
143                 dev_err(kfd_device, "error initializing iommu device\n");
144                 return false;
145         }
146
147         if (!kfd_set_pasid_limit(pasid_limit)) {
148                 dev_err(kfd_device, "error setting pasid limit\n");
149                 amd_iommu_free_device(kfd->pdev);
150                 return false;
151         }
152
153         return true;
154 }
155
156 static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
157 {
158         struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
159
160         if (dev)
161                 kfd_unbind_process_from_device(dev, pasid);
162 }
163
164 bool kgd2kfd_device_init(struct kfd_dev *kfd,
165                          const struct kgd2kfd_shared_resources *gpu_resources)
166 {
167         unsigned int size;
168
169         kfd->shared_resources = *gpu_resources;
170
171         /* calculate max size of mqds needed for queues */
172         size = max_num_of_processes *
173                 max_num_of_queues_per_process *
174                 kfd->device_info->mqd_size_aligned;
175
176         /* add another 512KB for all other allocations on gart */
177         size += 512 * 1024;
178
179         if (kfd2kgd->init_sa_manager(kfd->kgd, size)) {
180                 dev_err(kfd_device,
181                         "Error initializing sa manager for device (%x:%x)\n",
182                         kfd->pdev->vendor, kfd->pdev->device);
183                 goto out;
184         }
185
186         kfd_doorbell_init(kfd);
187
188         if (kfd_topology_add_device(kfd) != 0) {
189                 dev_err(kfd_device,
190                         "Error adding device (%x:%x) to topology\n",
191                         kfd->pdev->vendor, kfd->pdev->device);
192                 goto kfd_topology_add_device_error;
193         }
194
195         if (kfd_interrupt_init(kfd)) {
196                 dev_err(kfd_device,
197                         "Error initializing interrupts for device (%x:%x)\n",
198                         kfd->pdev->vendor, kfd->pdev->device);
199                 goto kfd_interrupt_error;
200         }
201
202         if (!device_iommu_pasid_init(kfd)) {
203                 dev_err(kfd_device,
204                         "Error initializing iommuv2 for device (%x:%x)\n",
205                         kfd->pdev->vendor, kfd->pdev->device);
206                 goto device_iommu_pasid_error;
207         }
208         amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
209                                                 iommu_pasid_shutdown_callback);
210
211         kfd->dqm = device_queue_manager_init(kfd);
212         if (!kfd->dqm) {
213                 dev_err(kfd_device,
214                         "Error initializing queue manager for device (%x:%x)\n",
215                         kfd->pdev->vendor, kfd->pdev->device);
216                 goto device_queue_manager_error;
217         }
218
219         if (kfd->dqm->start(kfd->dqm) != 0) {
220                 dev_err(kfd_device,
221                         "Error starting queuen manager for device (%x:%x)\n",
222                         kfd->pdev->vendor, kfd->pdev->device);
223                 goto dqm_start_error;
224         }
225
226         kfd->init_complete = true;
227         dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor,
228                  kfd->pdev->device);
229
230         pr_debug("kfd: Starting kfd with the following scheduling policy %d\n",
231                 sched_policy);
232
233         goto out;
234
235 dqm_start_error:
236         device_queue_manager_uninit(kfd->dqm);
237 device_queue_manager_error:
238         amd_iommu_free_device(kfd->pdev);
239 device_iommu_pasid_error:
240         kfd_interrupt_exit(kfd);
241 kfd_interrupt_error:
242         kfd_topology_remove_device(kfd);
243 kfd_topology_add_device_error:
244         kfd2kgd->fini_sa_manager(kfd->kgd);
245         dev_err(kfd_device,
246                 "device (%x:%x) NOT added due to errors\n",
247                 kfd->pdev->vendor, kfd->pdev->device);
248 out:
249         return kfd->init_complete;
250 }
251
252 void kgd2kfd_device_exit(struct kfd_dev *kfd)
253 {
254         if (kfd->init_complete) {
255                 device_queue_manager_uninit(kfd->dqm);
256                 amd_iommu_free_device(kfd->pdev);
257                 kfd_interrupt_exit(kfd);
258                 kfd_topology_remove_device(kfd);
259         }
260
261         kfree(kfd);
262 }
263
264 void kgd2kfd_suspend(struct kfd_dev *kfd)
265 {
266         BUG_ON(kfd == NULL);
267
268         if (kfd->init_complete) {
269                 kfd->dqm->stop(kfd->dqm);
270                 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
271                 amd_iommu_free_device(kfd->pdev);
272         }
273 }
274
275 int kgd2kfd_resume(struct kfd_dev *kfd)
276 {
277         unsigned int pasid_limit;
278         int err;
279
280         BUG_ON(kfd == NULL);
281
282         pasid_limit = kfd_get_pasid_limit();
283
284         if (kfd->init_complete) {
285                 err = amd_iommu_init_device(kfd->pdev, pasid_limit);
286                 if (err < 0)
287                         return -ENXIO;
288                 amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
289                                                 iommu_pasid_shutdown_callback);
290                 kfd->dqm->start(kfd->dqm);
291         }
292
293         return 0;
294 }
295
296 /* This is called directly from KGD at ISR. */
297 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
298 {
299         if (kfd->init_complete) {
300                 spin_lock(&kfd->interrupt_lock);
301
302                 if (kfd->interrupts_active
303                     && enqueue_ih_ring_entry(kfd, ih_ring_entry))
304                         schedule_work(&kfd->interrupt_work);
305
306                 spin_unlock(&kfd->interrupt_lock);
307         }
308 }