2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
4 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com>
6 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/clk.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/platform_device.h>
32 #include <linux/regulator/consumer.h>
34 #include "exynos_thermal_common.h"
35 #include "exynos_tmu.h"
36 #include "exynos_tmu_data.h"
39 * struct exynos_tmu_data : A structure to hold the private data of the TMU
41 * @id: identifier of the one instance of the TMU controller.
42 * @pdata: pointer to the tmu platform/configuration data
43 * @base: base address of the single instance of the TMU controller.
44 * @base_second: base address of the common registers of the TMU controller.
45 * @irq: irq number of the TMU controller.
46 * @soc: id of the SOC type.
47 * @irq_work: pointer to the irq work structure.
48 * @lock: lock to implement synchronization.
49 * @clk: pointer to the clock structure.
50 * @clk_sec: pointer to the clock structure for accessing the base_second.
51 * @temp_error1: fused value of the first point trim.
52 * @temp_error2: fused value of the second point trim.
53 * @regulator: pointer to the TMU regulator structure.
54 * @reg_conf: pointer to structure to register with core thermal.
56 struct exynos_tmu_data {
58 struct exynos_tmu_platform_data *pdata;
60 void __iomem *base_second;
63 struct work_struct irq_work;
65 struct clk *clk, *clk_sec;
66 u8 temp_error1, temp_error2;
67 struct regulator *regulator;
68 struct thermal_sensor_conf *reg_conf;
72 * TMU treats temperature as a mapped temperature code.
73 * The temperature is converted differently depending on the calibration type.
75 static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
77 struct exynos_tmu_platform_data *pdata = data->pdata;
80 switch (pdata->cal_type) {
81 case TYPE_TWO_POINT_TRIMMING:
82 temp_code = (temp - pdata->first_point_trim) *
83 (data->temp_error2 - data->temp_error1) /
84 (pdata->second_point_trim - pdata->first_point_trim) +
87 case TYPE_ONE_POINT_TRIMMING:
88 temp_code = temp + data->temp_error1 - pdata->first_point_trim;
91 temp_code = temp + pdata->default_temp_offset;
99 * Calculate a temperature value from a temperature code.
100 * The unit of the temperature is degree Celsius.
102 static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
104 struct exynos_tmu_platform_data *pdata = data->pdata;
107 switch (pdata->cal_type) {
108 case TYPE_TWO_POINT_TRIMMING:
109 temp = (temp_code - data->temp_error1) *
110 (pdata->second_point_trim - pdata->first_point_trim) /
111 (data->temp_error2 - data->temp_error1) +
112 pdata->first_point_trim;
114 case TYPE_ONE_POINT_TRIMMING:
115 temp = temp_code - data->temp_error1 + pdata->first_point_trim;
118 temp = temp_code - pdata->default_temp_offset;
125 static int exynos_tmu_initialize(struct platform_device *pdev)
127 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
128 struct exynos_tmu_platform_data *pdata = data->pdata;
129 const struct exynos_tmu_registers *reg = pdata->registers;
130 unsigned int status, trim_info = 0, con;
131 unsigned int rising_threshold = 0, falling_threshold = 0;
132 int ret = 0, threshold_code, i;
134 mutex_lock(&data->lock);
135 clk_enable(data->clk);
136 if (!IS_ERR(data->clk_sec))
137 clk_enable(data->clk_sec);
139 if (TMU_SUPPORTS(pdata, READY_STATUS)) {
140 status = readb(data->base + reg->tmu_status);
147 if (TMU_SUPPORTS(pdata, TRIM_RELOAD))
148 __raw_writel(1, data->base + reg->triminfo_ctrl);
150 /* Save trimming info in order to perform calibration */
151 if (data->soc == SOC_ARCH_EXYNOS5440) {
153 * For exynos5440 soc triminfo value is swapped between TMU0 and
154 * TMU2, so the below logic is needed.
158 trim_info = readl(data->base +
159 EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
162 trim_info = readl(data->base + reg->triminfo_data);
165 trim_info = readl(data->base -
166 EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
169 /* On exynos5420 the triminfo register is in the shared space */
170 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
171 trim_info = readl(data->base_second +
174 trim_info = readl(data->base + reg->triminfo_data);
176 data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
177 data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) &
178 EXYNOS_TMU_TEMP_MASK);
180 if (!data->temp_error1 ||
181 (pdata->min_efuse_value > data->temp_error1) ||
182 (data->temp_error1 > pdata->max_efuse_value))
183 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
185 if (!data->temp_error2)
187 (pdata->efuse_value >> reg->triminfo_85_shift) &
188 EXYNOS_TMU_TEMP_MASK;
190 rising_threshold = readl(data->base + reg->threshold_th0);
192 if (data->soc == SOC_ARCH_EXYNOS4210) {
193 /* Write temperature code for threshold */
194 threshold_code = temp_to_code(data, pdata->threshold);
195 writeb(threshold_code,
196 data->base + reg->threshold_temp);
197 for (i = 0; i < pdata->non_hw_trigger_levels; i++)
198 writeb(pdata->trigger_levels[i], data->base +
199 reg->threshold_th0 + i * sizeof(reg->threshold_th0));
201 writel(reg->intclr_rise_mask, data->base + reg->tmu_intclear);
203 /* Write temperature code for rising and falling threshold */
204 for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
205 threshold_code = temp_to_code(data,
206 pdata->trigger_levels[i]);
207 rising_threshold &= ~(0xff << 8 * i);
208 rising_threshold |= threshold_code << 8 * i;
209 if (pdata->threshold_falling) {
210 threshold_code = temp_to_code(data,
211 pdata->trigger_levels[i] -
212 pdata->threshold_falling);
213 falling_threshold |= threshold_code << 8 * i;
217 writel(rising_threshold,
218 data->base + reg->threshold_th0);
219 writel(falling_threshold,
220 data->base + reg->threshold_th1);
222 writel((reg->intclr_rise_mask << reg->intclr_rise_shift) |
223 (reg->intclr_fall_mask << reg->intclr_fall_shift),
224 data->base + reg->tmu_intclear);
226 /* if last threshold limit is also present */
227 i = pdata->max_trigger_level - 1;
228 if (pdata->trigger_levels[i] &&
229 (pdata->trigger_type[i] == HW_TRIP)) {
230 threshold_code = temp_to_code(data,
231 pdata->trigger_levels[i]);
232 if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
233 /* 1-4 level to be assigned in th0 reg */
234 rising_threshold &= ~(0xff << 8 * i);
235 rising_threshold |= threshold_code << 8 * i;
236 writel(rising_threshold,
237 data->base + reg->threshold_th0);
238 } else if (i == EXYNOS_MAX_TRIGGER_PER_REG) {
239 /* 5th level to be assigned in th2 reg */
241 threshold_code << reg->threshold_th3_l0_shift;
242 writel(rising_threshold,
243 data->base + reg->threshold_th2);
245 con = readl(data->base + reg->tmu_ctrl);
246 con |= (1 << reg->therm_trip_en_shift);
247 writel(con, data->base + reg->tmu_ctrl);
250 /*Clear the PMIN in the common TMU register*/
251 if (reg->tmu_pmin && !data->id)
252 writel(0, data->base_second + reg->tmu_pmin);
254 clk_disable(data->clk);
255 mutex_unlock(&data->lock);
256 if (!IS_ERR(data->clk_sec))
257 clk_disable(data->clk_sec);
262 static void exynos_tmu_control(struct platform_device *pdev, bool on)
264 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
265 struct exynos_tmu_platform_data *pdata = data->pdata;
266 const struct exynos_tmu_registers *reg = pdata->registers;
267 unsigned int con, interrupt_en;
269 mutex_lock(&data->lock);
270 clk_enable(data->clk);
272 con = readl(data->base + reg->tmu_ctrl);
275 con |= (pdata->test_mux << reg->test_mux_addr_shift);
277 if (pdata->reference_voltage) {
278 con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
279 con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
283 con &= ~(reg->buf_slope_sel_mask << reg->buf_slope_sel_shift);
284 con |= (pdata->gain << reg->buf_slope_sel_shift);
287 if (pdata->noise_cancel_mode) {
288 con &= ~(reg->therm_trip_mode_mask <<
289 reg->therm_trip_mode_shift);
290 con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
294 con |= (1 << reg->core_en_shift);
296 pdata->trigger_enable[3] << reg->inten_rise3_shift |
297 pdata->trigger_enable[2] << reg->inten_rise2_shift |
298 pdata->trigger_enable[1] << reg->inten_rise1_shift |
299 pdata->trigger_enable[0] << reg->inten_rise0_shift;
300 if (TMU_SUPPORTS(pdata, FALLING_TRIP))
302 interrupt_en << reg->inten_fall0_shift;
304 con &= ~(1 << reg->core_en_shift);
305 interrupt_en = 0; /* Disable all interrupts */
307 writel(interrupt_en, data->base + reg->tmu_inten);
308 writel(con, data->base + reg->tmu_ctrl);
310 clk_disable(data->clk);
311 mutex_unlock(&data->lock);
314 static int exynos_tmu_read(struct exynos_tmu_data *data)
316 struct exynos_tmu_platform_data *pdata = data->pdata;
317 const struct exynos_tmu_registers *reg = pdata->registers;
321 mutex_lock(&data->lock);
322 clk_enable(data->clk);
324 temp_code = readb(data->base + reg->tmu_cur_temp);
326 if (data->soc == SOC_ARCH_EXYNOS4210)
327 /* temp_code should range between 75 and 175 */
328 if (temp_code < 75 || temp_code > 175) {
333 temp = code_to_temp(data, temp_code);
335 clk_disable(data->clk);
336 mutex_unlock(&data->lock);
341 #ifdef CONFIG_THERMAL_EMULATION
342 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
344 struct exynos_tmu_data *data = drv_data;
345 struct exynos_tmu_platform_data *pdata = data->pdata;
346 const struct exynos_tmu_registers *reg = pdata->registers;
350 if (!TMU_SUPPORTS(pdata, EMULATION))
353 if (temp && temp < MCELSIUS)
356 mutex_lock(&data->lock);
357 clk_enable(data->clk);
359 val = readl(data->base + reg->emul_con);
364 if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
365 val &= ~(EXYNOS_EMUL_TIME_MASK << reg->emul_time_shift);
366 val |= (EXYNOS_EMUL_TIME << reg->emul_time_shift);
368 val &= ~(EXYNOS_EMUL_DATA_MASK << reg->emul_temp_shift);
369 val |= (temp_to_code(data, temp) << reg->emul_temp_shift) |
372 val &= ~EXYNOS_EMUL_ENABLE;
375 writel(val, data->base + reg->emul_con);
377 clk_disable(data->clk);
378 mutex_unlock(&data->lock);
384 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
386 #endif/*CONFIG_THERMAL_EMULATION*/
388 static void exynos_tmu_work(struct work_struct *work)
390 struct exynos_tmu_data *data = container_of(work,
391 struct exynos_tmu_data, irq_work);
392 struct exynos_tmu_platform_data *pdata = data->pdata;
393 const struct exynos_tmu_registers *reg = pdata->registers;
394 unsigned int val_irq, val_type;
396 if (!IS_ERR(data->clk_sec))
397 clk_enable(data->clk_sec);
398 /* Find which sensor generated this interrupt */
399 if (reg->tmu_irqstatus) {
400 val_type = readl(data->base_second + reg->tmu_irqstatus);
401 if (!((val_type >> data->id) & 0x1))
404 if (!IS_ERR(data->clk_sec))
405 clk_disable(data->clk_sec);
407 exynos_report_trigger(data->reg_conf);
408 mutex_lock(&data->lock);
409 clk_enable(data->clk);
411 /* TODO: take action based on particular interrupt */
412 val_irq = readl(data->base + reg->tmu_intstat);
413 /* clear the interrupts */
414 writel(val_irq, data->base + reg->tmu_intclear);
416 clk_disable(data->clk);
417 mutex_unlock(&data->lock);
419 enable_irq(data->irq);
422 static irqreturn_t exynos_tmu_irq(int irq, void *id)
424 struct exynos_tmu_data *data = id;
426 disable_irq_nosync(irq);
427 schedule_work(&data->irq_work);
432 static const struct of_device_id exynos_tmu_match[] = {
434 .compatible = "samsung,exynos3250-tmu",
435 .data = (void *)EXYNOS3250_TMU_DRV_DATA,
438 .compatible = "samsung,exynos4210-tmu",
439 .data = (void *)EXYNOS4210_TMU_DRV_DATA,
442 .compatible = "samsung,exynos4412-tmu",
443 .data = (void *)EXYNOS4412_TMU_DRV_DATA,
446 .compatible = "samsung,exynos5250-tmu",
447 .data = (void *)EXYNOS5250_TMU_DRV_DATA,
450 .compatible = "samsung,exynos5260-tmu",
451 .data = (void *)EXYNOS5260_TMU_DRV_DATA,
454 .compatible = "samsung,exynos5420-tmu",
455 .data = (void *)EXYNOS5420_TMU_DRV_DATA,
458 .compatible = "samsung,exynos5420-tmu-ext-triminfo",
459 .data = (void *)EXYNOS5420_TMU_DRV_DATA,
462 .compatible = "samsung,exynos5440-tmu",
463 .data = (void *)EXYNOS5440_TMU_DRV_DATA,
467 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
469 static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
470 struct platform_device *pdev, int id)
472 struct exynos_tmu_init_data *data_table;
473 struct exynos_tmu_platform_data *tmu_data;
474 const struct of_device_id *match;
476 match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
479 data_table = (struct exynos_tmu_init_data *) match->data;
480 if (!data_table || id >= data_table->tmu_count)
482 tmu_data = data_table->tmu_data;
483 return (struct exynos_tmu_platform_data *) (tmu_data + id);
486 static int exynos_map_dt_data(struct platform_device *pdev)
488 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
489 struct exynos_tmu_platform_data *pdata;
493 if (!data || !pdev->dev.of_node)
497 * Try enabling the regulator if found
498 * TODO: Add regulator as an SOC feature, so that regulator enable
499 * is a compulsory call.
501 data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
502 if (!IS_ERR(data->regulator)) {
503 ret = regulator_enable(data->regulator);
505 dev_err(&pdev->dev, "failed to enable vtmu\n");
509 dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
512 data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
516 data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
517 if (data->irq <= 0) {
518 dev_err(&pdev->dev, "failed to get IRQ\n");
522 if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
523 dev_err(&pdev->dev, "failed to get Resource 0\n");
527 data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
529 dev_err(&pdev->dev, "Failed to ioremap memory\n");
530 return -EADDRNOTAVAIL;
533 pdata = exynos_get_driver_data(pdev, data->id);
535 dev_err(&pdev->dev, "No platform init data supplied.\n");
540 * Check if the TMU shares some registers and then try to map the
541 * memory of common registers.
543 if (!TMU_SUPPORTS(pdata, ADDRESS_MULTIPLE))
546 if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
547 dev_err(&pdev->dev, "failed to get Resource 1\n");
551 data->base_second = devm_ioremap(&pdev->dev, res.start,
552 resource_size(&res));
553 if (!data->base_second) {
554 dev_err(&pdev->dev, "Failed to ioremap memory\n");
561 static int exynos_tmu_probe(struct platform_device *pdev)
563 struct exynos_tmu_data *data;
564 struct exynos_tmu_platform_data *pdata;
565 struct thermal_sensor_conf *sensor_conf;
568 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
573 platform_set_drvdata(pdev, data);
574 mutex_init(&data->lock);
576 ret = exynos_map_dt_data(pdev);
582 INIT_WORK(&data->irq_work, exynos_tmu_work);
584 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
585 if (IS_ERR(data->clk)) {
586 dev_err(&pdev->dev, "Failed to get clock\n");
587 return PTR_ERR(data->clk);
590 data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
591 if (IS_ERR(data->clk_sec)) {
592 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
593 dev_err(&pdev->dev, "Failed to get triminfo clock\n");
594 return PTR_ERR(data->clk_sec);
597 ret = clk_prepare(data->clk_sec);
599 dev_err(&pdev->dev, "Failed to get clock\n");
604 ret = clk_prepare(data->clk);
606 dev_err(&pdev->dev, "Failed to get clock\n");
610 if (pdata->type == SOC_ARCH_EXYNOS3250 ||
611 pdata->type == SOC_ARCH_EXYNOS4210 ||
612 pdata->type == SOC_ARCH_EXYNOS4412 ||
613 pdata->type == SOC_ARCH_EXYNOS5250 ||
614 pdata->type == SOC_ARCH_EXYNOS5260 ||
615 pdata->type == SOC_ARCH_EXYNOS5420_TRIMINFO ||
616 pdata->type == SOC_ARCH_EXYNOS5440)
617 data->soc = pdata->type;
620 dev_err(&pdev->dev, "Platform not supported\n");
624 ret = exynos_tmu_initialize(pdev);
626 dev_err(&pdev->dev, "Failed to initialize TMU\n");
630 exynos_tmu_control(pdev, true);
632 /* Allocate a structure to register with the exynos core thermal */
633 sensor_conf = devm_kzalloc(&pdev->dev,
634 sizeof(struct thermal_sensor_conf), GFP_KERNEL);
639 sprintf(sensor_conf->name, "therm_zone%d", data->id);
640 sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
641 sensor_conf->write_emul_temp =
642 (int (*)(void *, unsigned long))exynos_tmu_set_emulation;
643 sensor_conf->driver_data = data;
644 sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
645 pdata->trigger_enable[1] + pdata->trigger_enable[2]+
646 pdata->trigger_enable[3];
648 for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
649 sensor_conf->trip_data.trip_val[i] =
650 pdata->threshold + pdata->trigger_levels[i];
651 sensor_conf->trip_data.trip_type[i] =
652 pdata->trigger_type[i];
655 sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
657 sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
658 for (i = 0; i < pdata->freq_tab_count; i++) {
659 sensor_conf->cooling_data.freq_data[i].freq_clip_max =
660 pdata->freq_tab[i].freq_clip_max;
661 sensor_conf->cooling_data.freq_data[i].temp_level =
662 pdata->freq_tab[i].temp_level;
664 sensor_conf->dev = &pdev->dev;
665 /* Register the sensor with thermal management interface */
666 ret = exynos_register_thermal(sensor_conf);
668 dev_err(&pdev->dev, "Failed to register thermal interface\n");
671 data->reg_conf = sensor_conf;
673 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
674 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
676 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
682 clk_unprepare(data->clk);
684 if (!IS_ERR(data->clk_sec))
685 clk_unprepare(data->clk_sec);
689 static int exynos_tmu_remove(struct platform_device *pdev)
691 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
693 exynos_unregister_thermal(data->reg_conf);
695 exynos_tmu_control(pdev, false);
697 clk_unprepare(data->clk);
698 if (!IS_ERR(data->clk_sec))
699 clk_unprepare(data->clk_sec);
701 if (!IS_ERR(data->regulator))
702 regulator_disable(data->regulator);
707 #ifdef CONFIG_PM_SLEEP
708 static int exynos_tmu_suspend(struct device *dev)
710 exynos_tmu_control(to_platform_device(dev), false);
715 static int exynos_tmu_resume(struct device *dev)
717 struct platform_device *pdev = to_platform_device(dev);
719 exynos_tmu_initialize(pdev);
720 exynos_tmu_control(pdev, true);
725 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
726 exynos_tmu_suspend, exynos_tmu_resume);
727 #define EXYNOS_TMU_PM (&exynos_tmu_pm)
729 #define EXYNOS_TMU_PM NULL
732 static struct platform_driver exynos_tmu_driver = {
734 .name = "exynos-tmu",
735 .owner = THIS_MODULE,
737 .of_match_table = exynos_tmu_match,
739 .probe = exynos_tmu_probe,
740 .remove = exynos_tmu_remove,
743 module_platform_driver(exynos_tmu_driver);
745 MODULE_DESCRIPTION("EXYNOS TMU Driver");
746 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
747 MODULE_LICENSE("GPL");
748 MODULE_ALIAS("platform:exynos-tmu");