Merge remote-tracking branches 'regulator/topic/pv88090', 'regulator/topic/qcom-smd...
[cascardo/linux.git] / drivers / acpi / cppc_acpi.c
1 /*
2  * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
3  *
4  * (C) Copyright 2014, 2015 Linaro Ltd.
5  * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  *
12  * CPPC describes a few methods for controlling CPU performance using
13  * information from a per CPU table called CPC. This table is described in
14  * the ACPI v5.0+ specification. The table consists of a list of
15  * registers which may be memory mapped or hardware registers and also may
16  * include some static integer values.
17  *
18  * CPU performance is on an abstract continuous scale as against a discretized
19  * P-state scale which is tied to CPU frequency only. In brief, the basic
20  * operation involves:
21  *
22  * - OS makes a CPU performance request. (Can provide min and max bounds)
23  *
24  * - Platform (such as BMC) is free to optimize request within requested bounds
25  *   depending on power/thermal budgets etc.
26  *
27  * - Platform conveys its decision back to OS
28  *
29  * The communication between OS and platform occurs through another medium
30  * called (PCC) Platform Communication Channel. This is a generic mailbox like
31  * mechanism which includes doorbell semantics to indicate register updates.
32  * See drivers/mailbox/pcc.c for details on PCC.
33  *
34  * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
35  * above specifications.
36  */
37
38 #define pr_fmt(fmt)     "ACPI CPPC: " fmt
39
40 #include <linux/cpufreq.h>
41 #include <linux/delay.h>
42
43 #include <acpi/cppc_acpi.h>
44 /*
45  * Lock to provide mutually exclusive access to the PCC
46  * channel. e.g. When the remote updates the shared region
47  * with new data, the reader needs to be protected from
48  * other CPUs activity on the same channel.
49  */
50 static DEFINE_SPINLOCK(pcc_lock);
51
52 /*
53  * The cpc_desc structure contains the ACPI register details
54  * as described in the per CPU _CPC tables. The details
55  * include the type of register (e.g. PCC, System IO, FFH etc.)
56  * and destination addresses which lets us READ/WRITE CPU performance
57  * information using the appropriate I/O methods.
58  */
59 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
60
61 /* This layer handles all the PCC specifics for CPPC. */
62 static struct mbox_chan *pcc_channel;
63 static void __iomem *pcc_comm_addr;
64 static u64 comm_base_addr;
65 static int pcc_subspace_idx = -1;
66 static u16 pcc_cmd_delay;
67 static bool pcc_channel_acquired;
68
69 /*
70  * Arbitrary Retries in case the remote processor is slow to respond
71  * to PCC commands.
72  */
73 #define NUM_RETRIES 500
74
75 static int send_pcc_cmd(u16 cmd)
76 {
77         int retries, result = -EIO;
78         struct acpi_pcct_hw_reduced *pcct_ss = pcc_channel->con_priv;
79         struct acpi_pcct_shared_memory *generic_comm_base =
80                 (struct acpi_pcct_shared_memory *) pcc_comm_addr;
81         u32 cmd_latency = pcct_ss->latency;
82
83         /* Min time OS should wait before sending next command. */
84         udelay(pcc_cmd_delay);
85
86         /* Write to the shared comm region. */
87         writew(cmd, &generic_comm_base->command);
88
89         /* Flip CMD COMPLETE bit */
90         writew(0, &generic_comm_base->status);
91
92         /* Ring doorbell */
93         result = mbox_send_message(pcc_channel, &cmd);
94         if (result < 0) {
95                 pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
96                                 cmd, result);
97                 return result;
98         }
99
100         /* Wait for a nominal time to let platform process command. */
101         udelay(cmd_latency);
102
103         /* Retry in case the remote processor was too slow to catch up. */
104         for (retries = NUM_RETRIES; retries > 0; retries--) {
105                 if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
106                         result = 0;
107                         break;
108                 }
109         }
110
111         mbox_client_txdone(pcc_channel, result);
112         return result;
113 }
114
115 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
116 {
117         if (ret)
118                 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
119                                 *(u16 *)msg, ret);
120         else
121                 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
122                                 *(u16 *)msg, ret);
123 }
124
125 struct mbox_client cppc_mbox_cl = {
126         .tx_done = cppc_chan_tx_done,
127         .knows_txdone = true,
128 };
129
130 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
131 {
132         int result = -EFAULT;
133         acpi_status status = AE_OK;
134         struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
135         struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
136         struct acpi_buffer state = {0, NULL};
137         union acpi_object  *psd = NULL;
138         struct acpi_psd_package *pdomain;
139
140         status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
141                         ACPI_TYPE_PACKAGE);
142         if (ACPI_FAILURE(status))
143                 return -ENODEV;
144
145         psd = buffer.pointer;
146         if (!psd || psd->package.count != 1) {
147                 pr_debug("Invalid _PSD data\n");
148                 goto end;
149         }
150
151         pdomain = &(cpc_ptr->domain_info);
152
153         state.length = sizeof(struct acpi_psd_package);
154         state.pointer = pdomain;
155
156         status = acpi_extract_package(&(psd->package.elements[0]),
157                 &format, &state);
158         if (ACPI_FAILURE(status)) {
159                 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
160                 goto end;
161         }
162
163         if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
164                 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
165                 goto end;
166         }
167
168         if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
169                 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
170                 goto end;
171         }
172
173         if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
174             pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
175             pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
176                 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
177                 goto end;
178         }
179
180         result = 0;
181 end:
182         kfree(buffer.pointer);
183         return result;
184 }
185
186 /**
187  * acpi_get_psd_map - Map the CPUs in a common freq domain.
188  * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
189  *
190  *      Return: 0 for success or negative value for err.
191  */
192 int acpi_get_psd_map(struct cpudata **all_cpu_data)
193 {
194         int count_target;
195         int retval = 0;
196         unsigned int i, j;
197         cpumask_var_t covered_cpus;
198         struct cpudata *pr, *match_pr;
199         struct acpi_psd_package *pdomain;
200         struct acpi_psd_package *match_pdomain;
201         struct cpc_desc *cpc_ptr, *match_cpc_ptr;
202
203         if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
204                 return -ENOMEM;
205
206         /*
207          * Now that we have _PSD data from all CPUs, lets setup P-state
208          * domain info.
209          */
210         for_each_possible_cpu(i) {
211                 pr = all_cpu_data[i];
212                 if (!pr)
213                         continue;
214
215                 if (cpumask_test_cpu(i, covered_cpus))
216                         continue;
217
218                 cpc_ptr = per_cpu(cpc_desc_ptr, i);
219                 if (!cpc_ptr)
220                         continue;
221
222                 pdomain = &(cpc_ptr->domain_info);
223                 cpumask_set_cpu(i, pr->shared_cpu_map);
224                 cpumask_set_cpu(i, covered_cpus);
225                 if (pdomain->num_processors <= 1)
226                         continue;
227
228                 /* Validate the Domain info */
229                 count_target = pdomain->num_processors;
230                 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
231                         pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
232                 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
233                         pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
234                 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
235                         pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
236
237                 for_each_possible_cpu(j) {
238                         if (i == j)
239                                 continue;
240
241                         match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
242                         if (!match_cpc_ptr)
243                                 continue;
244
245                         match_pdomain = &(match_cpc_ptr->domain_info);
246                         if (match_pdomain->domain != pdomain->domain)
247                                 continue;
248
249                         /* Here i and j are in the same domain */
250                         if (match_pdomain->num_processors != count_target) {
251                                 retval = -EFAULT;
252                                 goto err_ret;
253                         }
254
255                         if (pdomain->coord_type != match_pdomain->coord_type) {
256                                 retval = -EFAULT;
257                                 goto err_ret;
258                         }
259
260                         cpumask_set_cpu(j, covered_cpus);
261                         cpumask_set_cpu(j, pr->shared_cpu_map);
262                 }
263
264                 for_each_possible_cpu(j) {
265                         if (i == j)
266                                 continue;
267
268                         match_pr = all_cpu_data[j];
269                         if (!match_pr)
270                                 continue;
271
272                         match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
273                         if (!match_cpc_ptr)
274                                 continue;
275
276                         match_pdomain = &(match_cpc_ptr->domain_info);
277                         if (match_pdomain->domain != pdomain->domain)
278                                 continue;
279
280                         match_pr->shared_type = pr->shared_type;
281                         cpumask_copy(match_pr->shared_cpu_map,
282                                      pr->shared_cpu_map);
283                 }
284         }
285
286 err_ret:
287         for_each_possible_cpu(i) {
288                 pr = all_cpu_data[i];
289                 if (!pr)
290                         continue;
291
292                 /* Assume no coordination on any error parsing domain info */
293                 if (retval) {
294                         cpumask_clear(pr->shared_cpu_map);
295                         cpumask_set_cpu(i, pr->shared_cpu_map);
296                         pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
297                 }
298         }
299
300         free_cpumask_var(covered_cpus);
301         return retval;
302 }
303 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
304
305 static int register_pcc_channel(int pcc_subspace_idx)
306 {
307         struct acpi_pcct_hw_reduced *cppc_ss;
308         unsigned int len;
309
310         if (pcc_subspace_idx >= 0) {
311                 pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
312                                 pcc_subspace_idx);
313
314                 if (IS_ERR(pcc_channel)) {
315                         pr_err("Failed to find PCC communication channel\n");
316                         return -ENODEV;
317                 }
318
319                 /*
320                  * The PCC mailbox controller driver should
321                  * have parsed the PCCT (global table of all
322                  * PCC channels) and stored pointers to the
323                  * subspace communication region in con_priv.
324                  */
325                 cppc_ss = pcc_channel->con_priv;
326
327                 if (!cppc_ss) {
328                         pr_err("No PCC subspace found for CPPC\n");
329                         return -ENODEV;
330                 }
331
332                 /*
333                  * This is the shared communication region
334                  * for the OS and Platform to communicate over.
335                  */
336                 comm_base_addr = cppc_ss->base_address;
337                 len = cppc_ss->length;
338                 pcc_cmd_delay = cppc_ss->min_turnaround_time;
339
340                 pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len);
341                 if (!pcc_comm_addr) {
342                         pr_err("Failed to ioremap PCC comm region mem\n");
343                         return -ENOMEM;
344                 }
345
346                 /* Set flag so that we dont come here for each CPU. */
347                 pcc_channel_acquired = true;
348         }
349
350         return 0;
351 }
352
353 /*
354  * An example CPC table looks like the following.
355  *
356  *      Name(_CPC, Package()
357  *                      {
358  *                      17,
359  *                      NumEntries
360  *                      1,
361  *                      // Revision
362  *                      ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
363  *                      // Highest Performance
364  *                      ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
365  *                      // Nominal Performance
366  *                      ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
367  *                      // Lowest Nonlinear Performance
368  *                      ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
369  *                      // Lowest Performance
370  *                      ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
371  *                      // Guaranteed Performance Register
372  *                      ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
373  *                      // Desired Performance Register
374  *                      ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
375  *                      ..
376  *                      ..
377  *                      ..
378  *
379  *              }
380  * Each Register() encodes how to access that specific register.
381  * e.g. a sample PCC entry has the following encoding:
382  *
383  *      Register (
384  *              PCC,
385  *              AddressSpaceKeyword
386  *              8,
387  *              //RegisterBitWidth
388  *              8,
389  *              //RegisterBitOffset
390  *              0x30,
391  *              //RegisterAddress
392  *              9
393  *              //AccessSize (subspace ID)
394  *              0
395  *              )
396  *      }
397  */
398
399 /**
400  * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
401  * @pr: Ptr to acpi_processor containing this CPUs logical Id.
402  *
403  *      Return: 0 for success or negative value for err.
404  */
405 int acpi_cppc_processor_probe(struct acpi_processor *pr)
406 {
407         struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
408         union acpi_object *out_obj, *cpc_obj;
409         struct cpc_desc *cpc_ptr;
410         struct cpc_reg *gas_t;
411         acpi_handle handle = pr->handle;
412         unsigned int num_ent, i, cpc_rev;
413         acpi_status status;
414         int ret = -EFAULT;
415
416         /* Parse the ACPI _CPC table for this cpu. */
417         status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
418                         ACPI_TYPE_PACKAGE);
419         if (ACPI_FAILURE(status)) {
420                 ret = -ENODEV;
421                 goto out_buf_free;
422         }
423
424         out_obj = (union acpi_object *) output.pointer;
425
426         cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
427         if (!cpc_ptr) {
428                 ret = -ENOMEM;
429                 goto out_buf_free;
430         }
431
432         /* First entry is NumEntries. */
433         cpc_obj = &out_obj->package.elements[0];
434         if (cpc_obj->type == ACPI_TYPE_INTEGER) {
435                 num_ent = cpc_obj->integer.value;
436         } else {
437                 pr_debug("Unexpected entry type(%d) for NumEntries\n",
438                                 cpc_obj->type);
439                 goto out_free;
440         }
441
442         /* Only support CPPCv2. Bail otherwise. */
443         if (num_ent != CPPC_NUM_ENT) {
444                 pr_debug("Firmware exports %d entries. Expected: %d\n",
445                                 num_ent, CPPC_NUM_ENT);
446                 goto out_free;
447         }
448
449         /* Second entry should be revision. */
450         cpc_obj = &out_obj->package.elements[1];
451         if (cpc_obj->type == ACPI_TYPE_INTEGER) {
452                 cpc_rev = cpc_obj->integer.value;
453         } else {
454                 pr_debug("Unexpected entry type(%d) for Revision\n",
455                                 cpc_obj->type);
456                 goto out_free;
457         }
458
459         if (cpc_rev != CPPC_REV) {
460                 pr_debug("Firmware exports revision:%d. Expected:%d\n",
461                                 cpc_rev, CPPC_REV);
462                 goto out_free;
463         }
464
465         /* Iterate through remaining entries in _CPC */
466         for (i = 2; i < num_ent; i++) {
467                 cpc_obj = &out_obj->package.elements[i];
468
469                 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
470                         cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
471                         cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
472                 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
473                         gas_t = (struct cpc_reg *)
474                                 cpc_obj->buffer.pointer;
475
476                         /*
477                          * The PCC Subspace index is encoded inside
478                          * the CPC table entries. The same PCC index
479                          * will be used for all the PCC entries,
480                          * so extract it only once.
481                          */
482                         if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
483                                 if (pcc_subspace_idx < 0)
484                                         pcc_subspace_idx = gas_t->access_width;
485                                 else if (pcc_subspace_idx != gas_t->access_width) {
486                                         pr_debug("Mismatched PCC ids.\n");
487                                         goto out_free;
488                                 }
489                         } else if (gas_t->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
490                                 /* Support only PCC and SYS MEM type regs */
491                                 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
492                                 goto out_free;
493                         }
494
495                         cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
496                         memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
497                 } else {
498                         pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
499                         goto out_free;
500                 }
501         }
502         /* Store CPU Logical ID */
503         cpc_ptr->cpu_id = pr->id;
504
505         /* Plug it into this CPUs CPC descriptor. */
506         per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
507
508         /* Parse PSD data for this CPU */
509         ret = acpi_get_psd(cpc_ptr, handle);
510         if (ret)
511                 goto out_free;
512
513         /* Register PCC channel once for all CPUs. */
514         if (!pcc_channel_acquired) {
515                 ret = register_pcc_channel(pcc_subspace_idx);
516                 if (ret)
517                         goto out_free;
518         }
519
520         /* Everything looks okay */
521         pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
522
523         kfree(output.pointer);
524         return 0;
525
526 out_free:
527         kfree(cpc_ptr);
528
529 out_buf_free:
530         kfree(output.pointer);
531         return ret;
532 }
533 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
534
535 /**
536  * acpi_cppc_processor_exit - Cleanup CPC structs.
537  * @pr: Ptr to acpi_processor containing this CPUs logical Id.
538  *
539  * Return: Void
540  */
541 void acpi_cppc_processor_exit(struct acpi_processor *pr)
542 {
543         struct cpc_desc *cpc_ptr;
544         cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
545         kfree(cpc_ptr);
546 }
547 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
548
549 static u64 get_phys_addr(struct cpc_reg *reg)
550 {
551         /* PCC communication addr space begins at byte offset 0x8. */
552         if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
553                 return (u64)comm_base_addr + 0x8 + reg->address;
554         else
555                 return reg->address;
556 }
557
558 static void cpc_read(struct cpc_reg *reg, u64 *val)
559 {
560         u64 addr = get_phys_addr(reg);
561
562         acpi_os_read_memory((acpi_physical_address)addr,
563                         val, reg->bit_width);
564 }
565
566 static void cpc_write(struct cpc_reg *reg, u64 val)
567 {
568         u64 addr = get_phys_addr(reg);
569
570         acpi_os_write_memory((acpi_physical_address)addr,
571                         val, reg->bit_width);
572 }
573
574 /**
575  * cppc_get_perf_caps - Get a CPUs performance capabilities.
576  * @cpunum: CPU from which to get capabilities info.
577  * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
578  *
579  * Return: 0 for success with perf_caps populated else -ERRNO.
580  */
581 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
582 {
583         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
584         struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
585                                                                  *nom_perf;
586         u64 high, low, ref, nom;
587         int ret = 0;
588
589         if (!cpc_desc) {
590                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
591                 return -ENODEV;
592         }
593
594         highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
595         lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
596         ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF];
597         nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF];
598
599         spin_lock(&pcc_lock);
600
601         /* Are any of the regs PCC ?*/
602         if ((highest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
603                         (lowest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
604                         (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
605                         (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
606                 /* Ring doorbell once to update PCC subspace */
607                 if (send_pcc_cmd(CMD_READ)) {
608                         ret = -EIO;
609                         goto out_err;
610                 }
611         }
612
613         cpc_read(&highest_reg->cpc_entry.reg, &high);
614         perf_caps->highest_perf = high;
615
616         cpc_read(&lowest_reg->cpc_entry.reg, &low);
617         perf_caps->lowest_perf = low;
618
619         cpc_read(&ref_perf->cpc_entry.reg, &ref);
620         perf_caps->reference_perf = ref;
621
622         cpc_read(&nom_perf->cpc_entry.reg, &nom);
623         perf_caps->nominal_perf = nom;
624
625         if (!ref)
626                 perf_caps->reference_perf = perf_caps->nominal_perf;
627
628         if (!high || !low || !nom)
629                 ret = -EFAULT;
630
631 out_err:
632         spin_unlock(&pcc_lock);
633         return ret;
634 }
635 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
636
637 /**
638  * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
639  * @cpunum: CPU from which to read counters.
640  * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
641  *
642  * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
643  */
644 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
645 {
646         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
647         struct cpc_register_resource *delivered_reg, *reference_reg;
648         u64 delivered, reference;
649         int ret = 0;
650
651         if (!cpc_desc) {
652                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
653                 return -ENODEV;
654         }
655
656         delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
657         reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
658
659         spin_lock(&pcc_lock);
660
661         /* Are any of the regs PCC ?*/
662         if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
663                         (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
664                 /* Ring doorbell once to update PCC subspace */
665                 if (send_pcc_cmd(CMD_READ)) {
666                         ret = -EIO;
667                         goto out_err;
668                 }
669         }
670
671         cpc_read(&delivered_reg->cpc_entry.reg, &delivered);
672         cpc_read(&reference_reg->cpc_entry.reg, &reference);
673
674         if (!delivered || !reference) {
675                 ret = -EFAULT;
676                 goto out_err;
677         }
678
679         perf_fb_ctrs->delivered = delivered;
680         perf_fb_ctrs->reference = reference;
681
682         perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered;
683         perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference;
684
685         perf_fb_ctrs->prev_delivered = delivered;
686         perf_fb_ctrs->prev_reference = reference;
687
688 out_err:
689         spin_unlock(&pcc_lock);
690         return ret;
691 }
692 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
693
694 /**
695  * cppc_set_perf - Set a CPUs performance controls.
696  * @cpu: CPU for which to set performance controls.
697  * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
698  *
699  * Return: 0 for success, -ERRNO otherwise.
700  */
701 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
702 {
703         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
704         struct cpc_register_resource *desired_reg;
705         int ret = 0;
706
707         if (!cpc_desc) {
708                 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
709                 return -ENODEV;
710         }
711
712         desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
713
714         spin_lock(&pcc_lock);
715
716         /*
717          * Skip writing MIN/MAX until Linux knows how to come up with
718          * useful values.
719          */
720         cpc_write(&desired_reg->cpc_entry.reg, perf_ctrls->desired_perf);
721
722         /* Is this a PCC reg ?*/
723         if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
724                 /* Ring doorbell so Remote can get our perf request. */
725                 if (send_pcc_cmd(CMD_WRITE))
726                         ret = -EIO;
727         }
728
729         spin_unlock(&pcc_lock);
730
731         return ret;
732 }
733 EXPORT_SYMBOL_GPL(cppc_set_perf);