thermal: exynos: remove redundant threshold_code checks from exynos_tmu_initialize()
[cascardo/linux.git] / drivers / thermal / samsung / exynos_tmu.c
1 /*
2  * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3  *
4  *  Copyright (C) 2011 Samsung Electronics
5  *  Donggeun Kim <dg77.kim@samsung.com>
6  *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 #include <linux/clk.h>
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/of.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/platform_device.h>
32 #include <linux/regulator/consumer.h>
33
34 #include "exynos_thermal_common.h"
35 #include "exynos_tmu.h"
36 #include "exynos_tmu_data.h"
37
38 /**
39  * struct exynos_tmu_data : A structure to hold the private data of the TMU
40         driver
41  * @id: identifier of the one instance of the TMU controller.
42  * @pdata: pointer to the tmu platform/configuration data
43  * @base: base address of the single instance of the TMU controller.
44  * @base_second: base address of the common registers of the TMU controller.
45  * @irq: irq number of the TMU controller.
46  * @soc: id of the SOC type.
47  * @irq_work: pointer to the irq work structure.
48  * @lock: lock to implement synchronization.
49  * @clk: pointer to the clock structure.
50  * @clk_sec: pointer to the clock structure for accessing the base_second.
51  * @temp_error1: fused value of the first point trim.
52  * @temp_error2: fused value of the second point trim.
53  * @regulator: pointer to the TMU regulator structure.
54  * @reg_conf: pointer to structure to register with core thermal.
55  */
56 struct exynos_tmu_data {
57         int id;
58         struct exynos_tmu_platform_data *pdata;
59         void __iomem *base;
60         void __iomem *base_second;
61         int irq;
62         enum soc_type soc;
63         struct work_struct irq_work;
64         struct mutex lock;
65         struct clk *clk, *clk_sec;
66         u8 temp_error1, temp_error2;
67         struct regulator *regulator;
68         struct thermal_sensor_conf *reg_conf;
69 };
70
71 /*
72  * TMU treats temperature as a mapped temperature code.
73  * The temperature is converted differently depending on the calibration type.
74  */
75 static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
76 {
77         struct exynos_tmu_platform_data *pdata = data->pdata;
78         int temp_code;
79
80         if (data->soc == SOC_ARCH_EXYNOS4210)
81                 /* temp should range between 25 and 125 */
82                 if (temp < 25 || temp > 125) {
83                         temp_code = -EINVAL;
84                         goto out;
85                 }
86
87         switch (pdata->cal_type) {
88         case TYPE_TWO_POINT_TRIMMING:
89                 temp_code = (temp - pdata->first_point_trim) *
90                         (data->temp_error2 - data->temp_error1) /
91                         (pdata->second_point_trim - pdata->first_point_trim) +
92                         data->temp_error1;
93                 break;
94         case TYPE_ONE_POINT_TRIMMING:
95                 temp_code = temp + data->temp_error1 - pdata->first_point_trim;
96                 break;
97         default:
98                 temp_code = temp + pdata->default_temp_offset;
99                 break;
100         }
101 out:
102         return temp_code;
103 }
104
105 /*
106  * Calculate a temperature value from a temperature code.
107  * The unit of the temperature is degree Celsius.
108  */
109 static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
110 {
111         struct exynos_tmu_platform_data *pdata = data->pdata;
112         int temp;
113
114         if (data->soc == SOC_ARCH_EXYNOS4210)
115                 /* temp_code should range between 75 and 175 */
116                 if (temp_code < 75 || temp_code > 175) {
117                         temp = -ENODATA;
118                         goto out;
119                 }
120
121         switch (pdata->cal_type) {
122         case TYPE_TWO_POINT_TRIMMING:
123                 temp = (temp_code - data->temp_error1) *
124                         (pdata->second_point_trim - pdata->first_point_trim) /
125                         (data->temp_error2 - data->temp_error1) +
126                         pdata->first_point_trim;
127                 break;
128         case TYPE_ONE_POINT_TRIMMING:
129                 temp = temp_code - data->temp_error1 + pdata->first_point_trim;
130                 break;
131         default:
132                 temp = temp_code - pdata->default_temp_offset;
133                 break;
134         }
135 out:
136         return temp;
137 }
138
139 static int exynos_tmu_initialize(struct platform_device *pdev)
140 {
141         struct exynos_tmu_data *data = platform_get_drvdata(pdev);
142         struct exynos_tmu_platform_data *pdata = data->pdata;
143         const struct exynos_tmu_registers *reg = pdata->registers;
144         unsigned int status, trim_info = 0, con;
145         unsigned int rising_threshold = 0, falling_threshold = 0;
146         int ret = 0, threshold_code, i, trigger_levs = 0;
147
148         mutex_lock(&data->lock);
149         clk_enable(data->clk);
150         if (!IS_ERR(data->clk_sec))
151                 clk_enable(data->clk_sec);
152
153         if (TMU_SUPPORTS(pdata, READY_STATUS)) {
154                 status = readb(data->base + reg->tmu_status);
155                 if (!status) {
156                         ret = -EBUSY;
157                         goto out;
158                 }
159         }
160
161         if (TMU_SUPPORTS(pdata, TRIM_RELOAD))
162                 __raw_writel(1, data->base + reg->triminfo_ctrl);
163
164         /* Save trimming info in order to perform calibration */
165         if (data->soc == SOC_ARCH_EXYNOS5440) {
166                 /*
167                  * For exynos5440 soc triminfo value is swapped between TMU0 and
168                  * TMU2, so the below logic is needed.
169                  */
170                 switch (data->id) {
171                 case 0:
172                         trim_info = readl(data->base +
173                         EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
174                         break;
175                 case 1:
176                         trim_info = readl(data->base + reg->triminfo_data);
177                         break;
178                 case 2:
179                         trim_info = readl(data->base -
180                         EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
181                 }
182         } else {
183                 /* On exynos5420 the triminfo register is in the shared space */
184                 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
185                         trim_info = readl(data->base_second +
186                                                         reg->triminfo_data);
187                 else
188                         trim_info = readl(data->base + reg->triminfo_data);
189         }
190         data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
191         data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) &
192                                 EXYNOS_TMU_TEMP_MASK);
193
194         if (!data->temp_error1 ||
195                 (pdata->min_efuse_value > data->temp_error1) ||
196                 (data->temp_error1 > pdata->max_efuse_value))
197                 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
198
199         if (!data->temp_error2)
200                 data->temp_error2 =
201                         (pdata->efuse_value >> reg->triminfo_85_shift) &
202                         EXYNOS_TMU_TEMP_MASK;
203
204         for (i = 0; i < pdata->max_trigger_level; i++) {
205                 if (!pdata->trigger_levels[i])
206                         continue;
207
208                 /* Count trigger levels except the HW trip*/
209                 if (!(pdata->trigger_type[i] == HW_TRIP))
210                         trigger_levs++;
211         }
212
213         rising_threshold = readl(data->base + reg->threshold_th0);
214
215         if (data->soc == SOC_ARCH_EXYNOS4210) {
216                 /* Write temperature code for threshold */
217                 threshold_code = temp_to_code(data, pdata->threshold);
218                 writeb(threshold_code,
219                         data->base + reg->threshold_temp);
220                 for (i = 0; i < trigger_levs; i++)
221                         writeb(pdata->trigger_levels[i], data->base +
222                         reg->threshold_th0 + i * sizeof(reg->threshold_th0));
223
224                 writel(reg->intclr_rise_mask, data->base + reg->tmu_intclear);
225         } else {
226                 /* Write temperature code for rising and falling threshold */
227                 for (i = 0;
228                 i < trigger_levs && i < EXYNOS_MAX_TRIGGER_PER_REG; i++) {
229                         threshold_code = temp_to_code(data,
230                                                 pdata->trigger_levels[i]);
231                         rising_threshold &= ~(0xff << 8 * i);
232                         rising_threshold |= threshold_code << 8 * i;
233                         if (pdata->threshold_falling) {
234                                 threshold_code = temp_to_code(data,
235                                                 pdata->trigger_levels[i] -
236                                                 pdata->threshold_falling);
237                                 falling_threshold |= threshold_code << 8 * i;
238                         }
239                 }
240
241                 writel(rising_threshold,
242                                 data->base + reg->threshold_th0);
243                 writel(falling_threshold,
244                                 data->base + reg->threshold_th1);
245
246                 writel((reg->intclr_rise_mask << reg->intclr_rise_shift) |
247                         (reg->intclr_fall_mask << reg->intclr_fall_shift),
248                                 data->base + reg->tmu_intclear);
249
250                 /* if last threshold limit is also present */
251                 i = pdata->max_trigger_level - 1;
252                 if (pdata->trigger_levels[i] &&
253                                 (pdata->trigger_type[i] == HW_TRIP)) {
254                         threshold_code = temp_to_code(data,
255                                                 pdata->trigger_levels[i]);
256                         if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
257                                 /* 1-4 level to be assigned in th0 reg */
258                                 rising_threshold &= ~(0xff << 8 * i);
259                                 rising_threshold |= threshold_code << 8 * i;
260                                 writel(rising_threshold,
261                                         data->base + reg->threshold_th0);
262                         } else if (i == EXYNOS_MAX_TRIGGER_PER_REG) {
263                                 /* 5th level to be assigned in th2 reg */
264                                 rising_threshold =
265                                 threshold_code << reg->threshold_th3_l0_shift;
266                                 writel(rising_threshold,
267                                         data->base + reg->threshold_th2);
268                         }
269                         con = readl(data->base + reg->tmu_ctrl);
270                         con |= (1 << reg->therm_trip_en_shift);
271                         writel(con, data->base + reg->tmu_ctrl);
272                 }
273         }
274         /*Clear the PMIN in the common TMU register*/
275         if (reg->tmu_pmin && !data->id)
276                 writel(0, data->base_second + reg->tmu_pmin);
277 out:
278         clk_disable(data->clk);
279         mutex_unlock(&data->lock);
280         if (!IS_ERR(data->clk_sec))
281                 clk_disable(data->clk_sec);
282
283         return ret;
284 }
285
286 static void exynos_tmu_control(struct platform_device *pdev, bool on)
287 {
288         struct exynos_tmu_data *data = platform_get_drvdata(pdev);
289         struct exynos_tmu_platform_data *pdata = data->pdata;
290         const struct exynos_tmu_registers *reg = pdata->registers;
291         unsigned int con, interrupt_en;
292
293         mutex_lock(&data->lock);
294         clk_enable(data->clk);
295
296         con = readl(data->base + reg->tmu_ctrl);
297
298         if (pdata->test_mux)
299                 con |= (pdata->test_mux << reg->test_mux_addr_shift);
300
301         if (pdata->reference_voltage) {
302                 con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
303                 con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
304         }
305
306         if (pdata->gain) {
307                 con &= ~(reg->buf_slope_sel_mask << reg->buf_slope_sel_shift);
308                 con |= (pdata->gain << reg->buf_slope_sel_shift);
309         }
310
311         if (pdata->noise_cancel_mode) {
312                 con &= ~(reg->therm_trip_mode_mask <<
313                                         reg->therm_trip_mode_shift);
314                 con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
315         }
316
317         if (on) {
318                 con |= (1 << reg->core_en_shift);
319                 interrupt_en =
320                         pdata->trigger_enable[3] << reg->inten_rise3_shift |
321                         pdata->trigger_enable[2] << reg->inten_rise2_shift |
322                         pdata->trigger_enable[1] << reg->inten_rise1_shift |
323                         pdata->trigger_enable[0] << reg->inten_rise0_shift;
324                 if (TMU_SUPPORTS(pdata, FALLING_TRIP))
325                         interrupt_en |=
326                                 interrupt_en << reg->inten_fall0_shift;
327         } else {
328                 con &= ~(1 << reg->core_en_shift);
329                 interrupt_en = 0; /* Disable all interrupts */
330         }
331         writel(interrupt_en, data->base + reg->tmu_inten);
332         writel(con, data->base + reg->tmu_ctrl);
333
334         clk_disable(data->clk);
335         mutex_unlock(&data->lock);
336 }
337
338 static int exynos_tmu_read(struct exynos_tmu_data *data)
339 {
340         struct exynos_tmu_platform_data *pdata = data->pdata;
341         const struct exynos_tmu_registers *reg = pdata->registers;
342         u8 temp_code;
343         int temp;
344
345         mutex_lock(&data->lock);
346         clk_enable(data->clk);
347
348         temp_code = readb(data->base + reg->tmu_cur_temp);
349         temp = code_to_temp(data, temp_code);
350
351         clk_disable(data->clk);
352         mutex_unlock(&data->lock);
353
354         return temp;
355 }
356
357 #ifdef CONFIG_THERMAL_EMULATION
358 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
359 {
360         struct exynos_tmu_data *data = drv_data;
361         struct exynos_tmu_platform_data *pdata = data->pdata;
362         const struct exynos_tmu_registers *reg = pdata->registers;
363         unsigned int val;
364         int ret = -EINVAL;
365
366         if (!TMU_SUPPORTS(pdata, EMULATION))
367                 goto out;
368
369         if (temp && temp < MCELSIUS)
370                 goto out;
371
372         mutex_lock(&data->lock);
373         clk_enable(data->clk);
374
375         val = readl(data->base + reg->emul_con);
376
377         if (temp) {
378                 temp /= MCELSIUS;
379
380                 if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
381                         val &= ~(EXYNOS_EMUL_TIME_MASK << reg->emul_time_shift);
382                         val |= (EXYNOS_EMUL_TIME << reg->emul_time_shift);
383                 }
384                 val &= ~(EXYNOS_EMUL_DATA_MASK << reg->emul_temp_shift);
385                 val |= (temp_to_code(data, temp) << reg->emul_temp_shift) |
386                         EXYNOS_EMUL_ENABLE;
387         } else {
388                 val &= ~EXYNOS_EMUL_ENABLE;
389         }
390
391         writel(val, data->base + reg->emul_con);
392
393         clk_disable(data->clk);
394         mutex_unlock(&data->lock);
395         return 0;
396 out:
397         return ret;
398 }
399 #else
400 static int exynos_tmu_set_emulation(void *drv_data,     unsigned long temp)
401         { return -EINVAL; }
402 #endif/*CONFIG_THERMAL_EMULATION*/
403
404 static void exynos_tmu_work(struct work_struct *work)
405 {
406         struct exynos_tmu_data *data = container_of(work,
407                         struct exynos_tmu_data, irq_work);
408         struct exynos_tmu_platform_data *pdata = data->pdata;
409         const struct exynos_tmu_registers *reg = pdata->registers;
410         unsigned int val_irq, val_type;
411
412         if (!IS_ERR(data->clk_sec))
413                 clk_enable(data->clk_sec);
414         /* Find which sensor generated this interrupt */
415         if (reg->tmu_irqstatus) {
416                 val_type = readl(data->base_second + reg->tmu_irqstatus);
417                 if (!((val_type >> data->id) & 0x1))
418                         goto out;
419         }
420         if (!IS_ERR(data->clk_sec))
421                 clk_disable(data->clk_sec);
422
423         exynos_report_trigger(data->reg_conf);
424         mutex_lock(&data->lock);
425         clk_enable(data->clk);
426
427         /* TODO: take action based on particular interrupt */
428         val_irq = readl(data->base + reg->tmu_intstat);
429         /* clear the interrupts */
430         writel(val_irq, data->base + reg->tmu_intclear);
431
432         clk_disable(data->clk);
433         mutex_unlock(&data->lock);
434 out:
435         enable_irq(data->irq);
436 }
437
438 static irqreturn_t exynos_tmu_irq(int irq, void *id)
439 {
440         struct exynos_tmu_data *data = id;
441
442         disable_irq_nosync(irq);
443         schedule_work(&data->irq_work);
444
445         return IRQ_HANDLED;
446 }
447
448 static const struct of_device_id exynos_tmu_match[] = {
449         {
450                 .compatible = "samsung,exynos3250-tmu",
451                 .data = (void *)EXYNOS3250_TMU_DRV_DATA,
452         },
453         {
454                 .compatible = "samsung,exynos4210-tmu",
455                 .data = (void *)EXYNOS4210_TMU_DRV_DATA,
456         },
457         {
458                 .compatible = "samsung,exynos4412-tmu",
459                 .data = (void *)EXYNOS4412_TMU_DRV_DATA,
460         },
461         {
462                 .compatible = "samsung,exynos5250-tmu",
463                 .data = (void *)EXYNOS5250_TMU_DRV_DATA,
464         },
465         {
466                 .compatible = "samsung,exynos5260-tmu",
467                 .data = (void *)EXYNOS5260_TMU_DRV_DATA,
468         },
469         {
470                 .compatible = "samsung,exynos5420-tmu",
471                 .data = (void *)EXYNOS5420_TMU_DRV_DATA,
472         },
473         {
474                 .compatible = "samsung,exynos5420-tmu-ext-triminfo",
475                 .data = (void *)EXYNOS5420_TMU_DRV_DATA,
476         },
477         {
478                 .compatible = "samsung,exynos5440-tmu",
479                 .data = (void *)EXYNOS5440_TMU_DRV_DATA,
480         },
481         {},
482 };
483 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
484
485 static inline struct  exynos_tmu_platform_data *exynos_get_driver_data(
486                         struct platform_device *pdev, int id)
487 {
488         struct  exynos_tmu_init_data *data_table;
489         struct exynos_tmu_platform_data *tmu_data;
490         const struct of_device_id *match;
491
492         match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
493         if (!match)
494                 return NULL;
495         data_table = (struct exynos_tmu_init_data *) match->data;
496         if (!data_table || id >= data_table->tmu_count)
497                 return NULL;
498         tmu_data = data_table->tmu_data;
499         return (struct exynos_tmu_platform_data *) (tmu_data + id);
500 }
501
502 static int exynos_map_dt_data(struct platform_device *pdev)
503 {
504         struct exynos_tmu_data *data = platform_get_drvdata(pdev);
505         struct exynos_tmu_platform_data *pdata;
506         struct resource res;
507         int ret;
508
509         if (!data || !pdev->dev.of_node)
510                 return -ENODEV;
511
512         /*
513          * Try enabling the regulator if found
514          * TODO: Add regulator as an SOC feature, so that regulator enable
515          * is a compulsory call.
516          */
517         data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
518         if (!IS_ERR(data->regulator)) {
519                 ret = regulator_enable(data->regulator);
520                 if (ret) {
521                         dev_err(&pdev->dev, "failed to enable vtmu\n");
522                         return ret;
523                 }
524         } else {
525                 dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
526         }
527
528         data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
529         if (data->id < 0)
530                 data->id = 0;
531
532         data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
533         if (data->irq <= 0) {
534                 dev_err(&pdev->dev, "failed to get IRQ\n");
535                 return -ENODEV;
536         }
537
538         if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
539                 dev_err(&pdev->dev, "failed to get Resource 0\n");
540                 return -ENODEV;
541         }
542
543         data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
544         if (!data->base) {
545                 dev_err(&pdev->dev, "Failed to ioremap memory\n");
546                 return -EADDRNOTAVAIL;
547         }
548
549         pdata = exynos_get_driver_data(pdev, data->id);
550         if (!pdata) {
551                 dev_err(&pdev->dev, "No platform init data supplied.\n");
552                 return -ENODEV;
553         }
554         data->pdata = pdata;
555         /*
556          * Check if the TMU shares some registers and then try to map the
557          * memory of common registers.
558          */
559         if (!TMU_SUPPORTS(pdata, ADDRESS_MULTIPLE))
560                 return 0;
561
562         if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
563                 dev_err(&pdev->dev, "failed to get Resource 1\n");
564                 return -ENODEV;
565         }
566
567         data->base_second = devm_ioremap(&pdev->dev, res.start,
568                                         resource_size(&res));
569         if (!data->base_second) {
570                 dev_err(&pdev->dev, "Failed to ioremap memory\n");
571                 return -ENOMEM;
572         }
573
574         return 0;
575 }
576
577 static int exynos_tmu_probe(struct platform_device *pdev)
578 {
579         struct exynos_tmu_data *data;
580         struct exynos_tmu_platform_data *pdata;
581         struct thermal_sensor_conf *sensor_conf;
582         int ret, i;
583
584         data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
585                                         GFP_KERNEL);
586         if (!data)
587                 return -ENOMEM;
588
589         platform_set_drvdata(pdev, data);
590         mutex_init(&data->lock);
591
592         ret = exynos_map_dt_data(pdev);
593         if (ret)
594                 return ret;
595
596         pdata = data->pdata;
597
598         INIT_WORK(&data->irq_work, exynos_tmu_work);
599
600         data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
601         if (IS_ERR(data->clk)) {
602                 dev_err(&pdev->dev, "Failed to get clock\n");
603                 return  PTR_ERR(data->clk);
604         }
605
606         data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
607         if (IS_ERR(data->clk_sec)) {
608                 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
609                         dev_err(&pdev->dev, "Failed to get triminfo clock\n");
610                         return PTR_ERR(data->clk_sec);
611                 }
612         } else {
613                 ret = clk_prepare(data->clk_sec);
614                 if (ret) {
615                         dev_err(&pdev->dev, "Failed to get clock\n");
616                         return ret;
617                 }
618         }
619
620         ret = clk_prepare(data->clk);
621         if (ret) {
622                 dev_err(&pdev->dev, "Failed to get clock\n");
623                 goto err_clk_sec;
624         }
625
626         if (pdata->type == SOC_ARCH_EXYNOS3250 ||
627             pdata->type == SOC_ARCH_EXYNOS4210 ||
628             pdata->type == SOC_ARCH_EXYNOS4412 ||
629             pdata->type == SOC_ARCH_EXYNOS5250 ||
630             pdata->type == SOC_ARCH_EXYNOS5260 ||
631             pdata->type == SOC_ARCH_EXYNOS5420_TRIMINFO ||
632             pdata->type == SOC_ARCH_EXYNOS5440)
633                 data->soc = pdata->type;
634         else {
635                 ret = -EINVAL;
636                 dev_err(&pdev->dev, "Platform not supported\n");
637                 goto err_clk;
638         }
639
640         ret = exynos_tmu_initialize(pdev);
641         if (ret) {
642                 dev_err(&pdev->dev, "Failed to initialize TMU\n");
643                 goto err_clk;
644         }
645
646         exynos_tmu_control(pdev, true);
647
648         /* Allocate a structure to register with the exynos core thermal */
649         sensor_conf = devm_kzalloc(&pdev->dev,
650                                 sizeof(struct thermal_sensor_conf), GFP_KERNEL);
651         if (!sensor_conf) {
652                 ret = -ENOMEM;
653                 goto err_clk;
654         }
655         sprintf(sensor_conf->name, "therm_zone%d", data->id);
656         sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
657         sensor_conf->write_emul_temp =
658                 (int (*)(void *, unsigned long))exynos_tmu_set_emulation;
659         sensor_conf->driver_data = data;
660         sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
661                         pdata->trigger_enable[1] + pdata->trigger_enable[2]+
662                         pdata->trigger_enable[3];
663
664         for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
665                 sensor_conf->trip_data.trip_val[i] =
666                         pdata->threshold + pdata->trigger_levels[i];
667                 sensor_conf->trip_data.trip_type[i] =
668                                         pdata->trigger_type[i];
669         }
670
671         sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
672
673         sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
674         for (i = 0; i < pdata->freq_tab_count; i++) {
675                 sensor_conf->cooling_data.freq_data[i].freq_clip_max =
676                                         pdata->freq_tab[i].freq_clip_max;
677                 sensor_conf->cooling_data.freq_data[i].temp_level =
678                                         pdata->freq_tab[i].temp_level;
679         }
680         sensor_conf->dev = &pdev->dev;
681         /* Register the sensor with thermal management interface */
682         ret = exynos_register_thermal(sensor_conf);
683         if (ret) {
684                 dev_err(&pdev->dev, "Failed to register thermal interface\n");
685                 goto err_clk;
686         }
687         data->reg_conf = sensor_conf;
688
689         ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
690                 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
691         if (ret) {
692                 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
693                 goto err_clk;
694         }
695
696         return 0;
697 err_clk:
698         clk_unprepare(data->clk);
699 err_clk_sec:
700         if (!IS_ERR(data->clk_sec))
701                 clk_unprepare(data->clk_sec);
702         return ret;
703 }
704
705 static int exynos_tmu_remove(struct platform_device *pdev)
706 {
707         struct exynos_tmu_data *data = platform_get_drvdata(pdev);
708
709         exynos_unregister_thermal(data->reg_conf);
710
711         exynos_tmu_control(pdev, false);
712
713         clk_unprepare(data->clk);
714         if (!IS_ERR(data->clk_sec))
715                 clk_unprepare(data->clk_sec);
716
717         if (!IS_ERR(data->regulator))
718                 regulator_disable(data->regulator);
719
720         return 0;
721 }
722
723 #ifdef CONFIG_PM_SLEEP
724 static int exynos_tmu_suspend(struct device *dev)
725 {
726         exynos_tmu_control(to_platform_device(dev), false);
727
728         return 0;
729 }
730
731 static int exynos_tmu_resume(struct device *dev)
732 {
733         struct platform_device *pdev = to_platform_device(dev);
734
735         exynos_tmu_initialize(pdev);
736         exynos_tmu_control(pdev, true);
737
738         return 0;
739 }
740
741 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
742                          exynos_tmu_suspend, exynos_tmu_resume);
743 #define EXYNOS_TMU_PM   (&exynos_tmu_pm)
744 #else
745 #define EXYNOS_TMU_PM   NULL
746 #endif
747
748 static struct platform_driver exynos_tmu_driver = {
749         .driver = {
750                 .name   = "exynos-tmu",
751                 .owner  = THIS_MODULE,
752                 .pm     = EXYNOS_TMU_PM,
753                 .of_match_table = exynos_tmu_match,
754         },
755         .probe = exynos_tmu_probe,
756         .remove = exynos_tmu_remove,
757 };
758
759 module_platform_driver(exynos_tmu_driver);
760
761 MODULE_DESCRIPTION("EXYNOS TMU Driver");
762 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
763 MODULE_LICENSE("GPL");
764 MODULE_ALIAS("platform:exynos-tmu");