scsi: ufs: make the UFS variant a platform device
[cascardo/linux.git] / drivers / scsi / ufs / ufs-qcom.c
1 /*
2  * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/phy/phy.h>
19
20 #include <linux/phy/phy-qcom-ufs.h>
21 #include "ufshcd.h"
22 #include "ufshcd-pltfrm.h"
23 #include "unipro.h"
24 #include "ufs-qcom.h"
25 #include "ufshci.h"
26
27 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
28
29 static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result);
30 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
31                 const char *speed_mode);
32 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
33
34 static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
35 {
36         int err = 0;
37
38         err = ufshcd_dme_get(hba,
39                         UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
40         if (err)
41                 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
42                                 __func__, err);
43
44         return err;
45 }
46
47 static int ufs_qcom_host_clk_get(struct device *dev,
48                 const char *name, struct clk **clk_out)
49 {
50         struct clk *clk;
51         int err = 0;
52
53         clk = devm_clk_get(dev, name);
54         if (IS_ERR(clk)) {
55                 err = PTR_ERR(clk);
56                 dev_err(dev, "%s: failed to get %s err %d",
57                                 __func__, name, err);
58         } else {
59                 *clk_out = clk;
60         }
61
62         return err;
63 }
64
65 static int ufs_qcom_host_clk_enable(struct device *dev,
66                 const char *name, struct clk *clk)
67 {
68         int err = 0;
69
70         err = clk_prepare_enable(clk);
71         if (err)
72                 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
73
74         return err;
75 }
76
77 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
78 {
79         if (!host->is_lane_clks_enabled)
80                 return;
81
82         clk_disable_unprepare(host->tx_l1_sync_clk);
83         clk_disable_unprepare(host->tx_l0_sync_clk);
84         clk_disable_unprepare(host->rx_l1_sync_clk);
85         clk_disable_unprepare(host->rx_l0_sync_clk);
86
87         host->is_lane_clks_enabled = false;
88 }
89
90 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
91 {
92         int err = 0;
93         struct device *dev = host->hba->dev;
94
95         if (host->is_lane_clks_enabled)
96                 return 0;
97
98         err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
99                 host->rx_l0_sync_clk);
100         if (err)
101                 goto out;
102
103         err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
104                 host->tx_l0_sync_clk);
105         if (err)
106                 goto disable_rx_l0;
107
108         err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
109                 host->rx_l1_sync_clk);
110         if (err)
111                 goto disable_tx_l0;
112
113         err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
114                 host->tx_l1_sync_clk);
115         if (err)
116                 goto disable_rx_l1;
117
118         host->is_lane_clks_enabled = true;
119         goto out;
120
121 disable_rx_l1:
122         clk_disable_unprepare(host->rx_l1_sync_clk);
123 disable_tx_l0:
124         clk_disable_unprepare(host->tx_l0_sync_clk);
125 disable_rx_l0:
126         clk_disable_unprepare(host->rx_l0_sync_clk);
127 out:
128         return err;
129 }
130
131 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
132 {
133         int err = 0;
134         struct device *dev = host->hba->dev;
135
136         err = ufs_qcom_host_clk_get(dev,
137                         "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
138         if (err)
139                 goto out;
140
141         err = ufs_qcom_host_clk_get(dev,
142                         "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
143         if (err)
144                 goto out;
145
146         err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
147                 &host->rx_l1_sync_clk);
148         if (err)
149                 goto out;
150
151         err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
152                 &host->tx_l1_sync_clk);
153 out:
154         return err;
155 }
156
157 static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
158 {
159         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
160         struct phy *phy = host->generic_phy;
161         u32 tx_lanes;
162         int err = 0;
163
164         err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
165         if (err)
166                 goto out;
167
168         err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
169         if (err)
170                 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
171                         __func__);
172
173 out:
174         return err;
175 }
176
177 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
178 {
179         int err;
180         u32 tx_fsm_val = 0;
181         unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
182
183         do {
184                 err = ufshcd_dme_get(hba,
185                         UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val);
186                 if (err || tx_fsm_val == TX_FSM_HIBERN8)
187                         break;
188
189                 /* sleep for max. 200us */
190                 usleep_range(100, 200);
191         } while (time_before(jiffies, timeout));
192
193         /*
194          * we might have scheduled out for long during polling so
195          * check the state again.
196          */
197         if (time_after(jiffies, timeout))
198                 err = ufshcd_dme_get(hba,
199                                 UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val);
200
201         if (err) {
202                 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
203                                 __func__, err);
204         } else if (tx_fsm_val != TX_FSM_HIBERN8) {
205                 err = tx_fsm_val;
206                 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
207                                 __func__, err);
208         }
209
210         return err;
211 }
212
213 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
214 {
215         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
216         struct phy *phy = host->generic_phy;
217         int ret = 0;
218         bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
219                                                         ? true : false;
220
221         /* Assert PHY reset and apply PHY calibration values */
222         ufs_qcom_assert_reset(hba);
223         /* provide 1ms delay to let the reset pulse propagate */
224         usleep_range(1000, 1100);
225
226         ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
227         if (ret) {
228                 dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
229                         __func__, ret);
230                 goto out;
231         }
232
233         /* De-assert PHY reset and start serdes */
234         ufs_qcom_deassert_reset(hba);
235
236         /*
237          * after reset deassertion, phy will need all ref clocks,
238          * voltage, current to settle down before starting serdes.
239          */
240         usleep_range(1000, 1100);
241         ret = ufs_qcom_phy_start_serdes(phy);
242         if (ret) {
243                 dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
244                         __func__, ret);
245                 goto out;
246         }
247
248         ret = ufs_qcom_phy_is_pcs_ready(phy);
249         if (ret)
250                 dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
251                         __func__, ret);
252
253 out:
254         return ret;
255 }
256
257 /*
258  * The UTP controller has a number of internal clock gating cells (CGCs).
259  * Internal hardware sub-modules within the UTP controller control the CGCs.
260  * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
261  * in a specific operation, UTP controller CGCs are by default disabled and
262  * this function enables them (after every UFS link startup) to save some power
263  * leakage.
264  */
265 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
266 {
267         ufshcd_writel(hba,
268                 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
269                 REG_UFS_CFG2);
270
271         /* Ensure that HW clock gating is enabled before next operations */
272         mb();
273 }
274
275 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status)
276 {
277         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
278         int err = 0;
279
280         switch (status) {
281         case PRE_CHANGE:
282                 ufs_qcom_power_up_sequence(hba);
283                 /*
284                  * The PHY PLL output is the source of tx/rx lane symbol
285                  * clocks, hence, enable the lane clocks only after PHY
286                  * is initialized.
287                  */
288                 err = ufs_qcom_enable_lane_clks(host);
289                 break;
290         case POST_CHANGE:
291                 /* check if UFS PHY moved from DISABLED to HIBERN8 */
292                 err = ufs_qcom_check_hibern8(hba);
293                 ufs_qcom_enable_hw_clk_gating(hba);
294
295                 break;
296         default:
297                 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
298                 err = -EINVAL;
299                 break;
300         }
301         return err;
302 }
303
304 /**
305  * Returns non-zero for success (which rate of core_clk) and 0
306  * in case of a failure
307  */
308 static unsigned long
309 ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate)
310 {
311         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
312         struct ufs_clk_info *clki;
313         u32 core_clk_period_in_ns;
314         u32 tx_clk_cycles_per_us = 0;
315         unsigned long core_clk_rate = 0;
316         u32 core_clk_cycles_per_us = 0;
317
318         static u32 pwm_fr_table[][2] = {
319                 {UFS_PWM_G1, 0x1},
320                 {UFS_PWM_G2, 0x1},
321                 {UFS_PWM_G3, 0x1},
322                 {UFS_PWM_G4, 0x1},
323         };
324
325         static u32 hs_fr_table_rA[][2] = {
326                 {UFS_HS_G1, 0x1F},
327                 {UFS_HS_G2, 0x3e},
328         };
329
330         static u32 hs_fr_table_rB[][2] = {
331                 {UFS_HS_G1, 0x24},
332                 {UFS_HS_G2, 0x49},
333         };
334
335         /*
336          * The Qunipro controller does not use following registers:
337          * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
338          * UFS_REG_PA_LINK_STARTUP_TIMER
339          * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
340          * Aggregation logic.
341         */
342         if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
343                 goto out;
344
345         if (gear == 0) {
346                 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
347                 goto out_error;
348         }
349
350         list_for_each_entry(clki, &hba->clk_list_head, list) {
351                 if (!strcmp(clki->name, "core_clk"))
352                         core_clk_rate = clk_get_rate(clki->clk);
353         }
354
355         /* If frequency is smaller than 1MHz, set to 1MHz */
356         if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
357                 core_clk_rate = DEFAULT_CLK_RATE_HZ;
358
359         core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
360         ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
361
362         core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
363         core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
364         core_clk_period_in_ns &= MASK_CLK_NS_REG;
365
366         switch (hs) {
367         case FASTAUTO_MODE:
368         case FAST_MODE:
369                 if (rate == PA_HS_MODE_A) {
370                         if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
371                                 dev_err(hba->dev,
372                                         "%s: index %d exceeds table size %zu\n",
373                                         __func__, gear,
374                                         ARRAY_SIZE(hs_fr_table_rA));
375                                 goto out_error;
376                         }
377                         tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
378                 } else if (rate == PA_HS_MODE_B) {
379                         if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
380                                 dev_err(hba->dev,
381                                         "%s: index %d exceeds table size %zu\n",
382                                         __func__, gear,
383                                         ARRAY_SIZE(hs_fr_table_rB));
384                                 goto out_error;
385                         }
386                         tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
387                 } else {
388                         dev_err(hba->dev, "%s: invalid rate = %d\n",
389                                 __func__, rate);
390                         goto out_error;
391                 }
392                 break;
393         case SLOWAUTO_MODE:
394         case SLOW_MODE:
395                 if (gear > ARRAY_SIZE(pwm_fr_table)) {
396                         dev_err(hba->dev,
397                                         "%s: index %d exceeds table size %zu\n",
398                                         __func__, gear,
399                                         ARRAY_SIZE(pwm_fr_table));
400                         goto out_error;
401                 }
402                 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
403                 break;
404         case UNCHANGED:
405         default:
406                 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
407                 goto out_error;
408         }
409
410         /* this register 2 fields shall be written at once */
411         ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
412                                                 REG_UFS_TX_SYMBOL_CLK_NS_US);
413         goto out;
414
415 out_error:
416         core_clk_rate = 0;
417 out:
418         return core_clk_rate;
419 }
420
421 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, bool status)
422 {
423         unsigned long core_clk_rate = 0;
424         u32 core_clk_cycles_per_100ms;
425
426         switch (status) {
427         case PRE_CHANGE:
428                 core_clk_rate = ufs_qcom_cfg_timers(hba, UFS_PWM_G1,
429                                                     SLOWAUTO_MODE, 0);
430                 if (!core_clk_rate) {
431                         dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
432                                 __func__);
433                         return -EINVAL;
434                 }
435                 core_clk_cycles_per_100ms =
436                         (core_clk_rate / MSEC_PER_SEC) * 100;
437                 ufshcd_writel(hba, core_clk_cycles_per_100ms,
438                                         REG_UFS_PA_LINK_STARTUP_TIMER);
439                 break;
440         case POST_CHANGE:
441                 ufs_qcom_link_startup_post_change(hba);
442                 break;
443         default:
444                 break;
445         }
446
447         return 0;
448 }
449
450 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
451 {
452         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
453         struct phy *phy = host->generic_phy;
454         int ret = 0;
455
456         if (ufs_qcom_is_link_off(hba)) {
457                 /*
458                  * Disable the tx/rx lane symbol clocks before PHY is
459                  * powered down as the PLL source should be disabled
460                  * after downstream clocks are disabled.
461                  */
462                 ufs_qcom_disable_lane_clks(host);
463                 phy_power_off(phy);
464
465                 /* Assert PHY soft reset */
466                 ufs_qcom_assert_reset(hba);
467                 goto out;
468         }
469
470         /*
471          * If UniPro link is not active, PHY ref_clk, main PHY analog power
472          * rail and low noise analog power rail for PLL can be switched off.
473          */
474         if (!ufs_qcom_is_link_active(hba))
475                 phy_power_off(phy);
476
477 out:
478         return ret;
479 }
480
481 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
482 {
483         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
484         struct phy *phy = host->generic_phy;
485         int err;
486
487         err = phy_power_on(phy);
488         if (err) {
489                 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
490                         __func__, err);
491                 goto out;
492         }
493
494         hba->is_sys_suspended = false;
495
496 out:
497         return err;
498 }
499
500 struct ufs_qcom_dev_params {
501         u32 pwm_rx_gear;        /* pwm rx gear to work in */
502         u32 pwm_tx_gear;        /* pwm tx gear to work in */
503         u32 hs_rx_gear;         /* hs rx gear to work in */
504         u32 hs_tx_gear;         /* hs tx gear to work in */
505         u32 rx_lanes;           /* number of rx lanes */
506         u32 tx_lanes;           /* number of tx lanes */
507         u32 rx_pwr_pwm;         /* rx pwm working pwr */
508         u32 tx_pwr_pwm;         /* tx pwm working pwr */
509         u32 rx_pwr_hs;          /* rx hs working pwr */
510         u32 tx_pwr_hs;          /* tx hs working pwr */
511         u32 hs_rate;            /* rate A/B to work in HS */
512         u32 desired_working_mode;
513 };
514
515 static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
516                                       struct ufs_pa_layer_attr *dev_max,
517                                       struct ufs_pa_layer_attr *agreed_pwr)
518 {
519         int min_qcom_gear;
520         int min_dev_gear;
521         bool is_dev_sup_hs = false;
522         bool is_qcom_max_hs = false;
523
524         if (dev_max->pwr_rx == FAST_MODE)
525                 is_dev_sup_hs = true;
526
527         if (qcom_param->desired_working_mode == FAST) {
528                 is_qcom_max_hs = true;
529                 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
530                                       qcom_param->hs_tx_gear);
531         } else {
532                 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
533                                       qcom_param->pwm_tx_gear);
534         }
535
536         /*
537          * device doesn't support HS but qcom_param->desired_working_mode is
538          * HS, thus device and qcom_param don't agree
539          */
540         if (!is_dev_sup_hs && is_qcom_max_hs) {
541                 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
542                         __func__);
543                 return -ENOTSUPP;
544         } else if (is_dev_sup_hs && is_qcom_max_hs) {
545                 /*
546                  * since device supports HS, it supports FAST_MODE.
547                  * since qcom_param->desired_working_mode is also HS
548                  * then final decision (FAST/FASTAUTO) is done according
549                  * to qcom_params as it is the restricting factor
550                  */
551                 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
552                                                 qcom_param->rx_pwr_hs;
553         } else {
554                 /*
555                  * here qcom_param->desired_working_mode is PWM.
556                  * it doesn't matter whether device supports HS or PWM,
557                  * in both cases qcom_param->desired_working_mode will
558                  * determine the mode
559                  */
560                  agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
561                                                 qcom_param->rx_pwr_pwm;
562         }
563
564         /*
565          * we would like tx to work in the minimum number of lanes
566          * between device capability and vendor preferences.
567          * the same decision will be made for rx
568          */
569         agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
570                                                 qcom_param->tx_lanes);
571         agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
572                                                 qcom_param->rx_lanes);
573
574         /* device maximum gear is the minimum between device rx and tx gears */
575         min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
576
577         /*
578          * if both device capabilities and vendor pre-defined preferences are
579          * both HS or both PWM then set the minimum gear to be the chosen
580          * working gear.
581          * if one is PWM and one is HS then the one that is PWM get to decide
582          * what is the gear, as it is the one that also decided previously what
583          * pwr the device will be configured to.
584          */
585         if ((is_dev_sup_hs && is_qcom_max_hs) ||
586             (!is_dev_sup_hs && !is_qcom_max_hs))
587                 agreed_pwr->gear_rx = agreed_pwr->gear_tx =
588                         min_t(u32, min_dev_gear, min_qcom_gear);
589         else if (!is_dev_sup_hs)
590                 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
591         else
592                 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
593
594         agreed_pwr->hs_rate = qcom_param->hs_rate;
595         return 0;
596 }
597
598 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
599 {
600         int vote;
601         int err = 0;
602         char mode[BUS_VECTOR_NAME_LEN];
603
604         ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
605
606         vote = ufs_qcom_get_bus_vote(host, mode);
607         if (vote >= 0)
608                 err = ufs_qcom_set_bus_vote(host, vote);
609         else
610                 err = vote;
611
612         if (err)
613                 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
614         else
615                 host->bus_vote.saved_vote = vote;
616         return err;
617 }
618
619 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
620                                 bool status,
621                                 struct ufs_pa_layer_attr *dev_max_params,
622                                 struct ufs_pa_layer_attr *dev_req_params)
623 {
624         u32 val;
625         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
626         struct phy *phy = host->generic_phy;
627         struct ufs_qcom_dev_params ufs_qcom_cap;
628         int ret = 0;
629         int res = 0;
630
631         if (!dev_req_params) {
632                 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
633                 ret = -EINVAL;
634                 goto out;
635         }
636
637         switch (status) {
638         case PRE_CHANGE:
639                 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
640                 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
641                 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
642                 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
643                 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
644                 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
645                 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
646                 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
647                 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
648                 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
649                 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
650                 ufs_qcom_cap.desired_working_mode =
651                                         UFS_QCOM_LIMIT_DESIRED_MODE;
652
653                 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
654                                                  dev_max_params,
655                                                  dev_req_params);
656                 if (ret) {
657                         pr_err("%s: failed to determine capabilities\n",
658                                         __func__);
659                         goto out;
660                 }
661
662                 break;
663         case POST_CHANGE:
664                 if (!ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
665                                         dev_req_params->pwr_rx,
666                                         dev_req_params->hs_rate)) {
667                         dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
668                                 __func__);
669                         /*
670                          * we return error code at the end of the routine,
671                          * but continue to configure UFS_PHY_TX_LANE_ENABLE
672                          * and bus voting as usual
673                          */
674                         ret = -EINVAL;
675                 }
676
677                 val = ~(MAX_U32 << dev_req_params->lane_tx);
678                 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
679                 if (res) {
680                         dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
681                                 __func__, res);
682                         ret = res;
683                 }
684
685                 /* cache the power mode parameters to use internally */
686                 memcpy(&host->dev_req_params,
687                                 dev_req_params, sizeof(*dev_req_params));
688                 ufs_qcom_update_bus_bw_vote(host);
689                 break;
690         default:
691                 ret = -EINVAL;
692                 break;
693         }
694 out:
695         return ret;
696 }
697
698 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
699 {
700         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
701
702         if (host->hw_ver.major == 0x1)
703                 return UFSHCI_VERSION_11;
704         else
705                 return UFSHCI_VERSION_20;
706 }
707
708 /**
709  * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
710  * @hba: host controller instance
711  *
712  * QCOM UFS host controller might have some non standard behaviours (quirks)
713  * than what is specified by UFSHCI specification. Advertise all such
714  * quirks to standard UFS host controller driver so standard takes them into
715  * account.
716  */
717 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
718 {
719         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
720
721         if (host->hw_ver.major == 0x01) {
722                 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
723                             | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
724                             | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
725
726                 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
727                         hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
728         }
729
730         if (host->hw_ver.major >= 0x2) {
731                 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
732                 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
733
734                 if (!ufs_qcom_cap_qunipro(host))
735                         /* Legacy UniPro mode still need following quirks */
736                         hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
737                                 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
738                                 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
739         }
740 }
741
742 static void ufs_qcom_set_caps(struct ufs_hba *hba)
743 {
744         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
745
746         if (host->hw_ver.major >= 0x2)
747                 host->caps = UFS_QCOM_CAP_QUNIPRO;
748 }
749
750 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
751                 const char *speed_mode)
752 {
753         struct device *dev = host->hba->dev;
754         struct device_node *np = dev->of_node;
755         int err;
756         const char *key = "qcom,bus-vector-names";
757
758         if (!speed_mode) {
759                 err = -EINVAL;
760                 goto out;
761         }
762
763         if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
764                 err = of_property_match_string(np, key, "MAX");
765         else
766                 err = of_property_match_string(np, key, speed_mode);
767
768 out:
769         if (err < 0)
770                 dev_err(dev, "%s: Invalid %s mode %d\n",
771                                 __func__, speed_mode, err);
772         return err;
773 }
774
775 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
776 {
777         int err = 0;
778
779         if (vote != host->bus_vote.curr_vote)
780                 host->bus_vote.curr_vote = vote;
781
782         return err;
783 }
784
785 static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
786 {
787         int gear = max_t(u32, p->gear_rx, p->gear_tx);
788         int lanes = max_t(u32, p->lane_rx, p->lane_tx);
789         int pwr;
790
791         /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
792         if (!gear)
793                 gear = 1;
794
795         if (!lanes)
796                 lanes = 1;
797
798         if (!p->pwr_rx && !p->pwr_tx) {
799                 pwr = SLOWAUTO_MODE;
800                 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
801         } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
802                  p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
803                 pwr = FAST_MODE;
804                 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
805                          p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
806         } else {
807                 pwr = SLOW_MODE;
808                 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
809                          "PWM", gear, lanes);
810         }
811 }
812
813 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
814 {
815         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
816         int err = 0;
817         int vote = 0;
818
819         /*
820          * In case ufs_qcom_init() is not yet done, simply ignore.
821          * This ufs_qcom_setup_clocks() shall be called from
822          * ufs_qcom_init() after init is done.
823          */
824         if (!host)
825                 return 0;
826
827         if (on) {
828                 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
829                 if (err)
830                         goto out;
831
832                 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
833                 if (err) {
834                         dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
835                                 __func__, err);
836                         ufs_qcom_phy_disable_iface_clk(host->generic_phy);
837                         goto out;
838                 }
839                 /* enable the device ref clock */
840                 ufs_qcom_phy_enable_dev_ref_clk(host->generic_phy);
841                 vote = host->bus_vote.saved_vote;
842                 if (vote == host->bus_vote.min_bw_vote)
843                         ufs_qcom_update_bus_bw_vote(host);
844         } else {
845                 /* M-PHY RMMI interface clocks can be turned off */
846                 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
847                 if (!ufs_qcom_is_link_active(hba)) {
848                         /* turn off UFS local PHY ref_clk */
849                         ufs_qcom_phy_disable_ref_clk(host->generic_phy);
850                         /* disable device ref_clk */
851                         ufs_qcom_phy_disable_dev_ref_clk(host->generic_phy);
852                 }
853                 vote = host->bus_vote.min_bw_vote;
854         }
855
856         err = ufs_qcom_set_bus_vote(host, vote);
857         if (err)
858                 dev_err(hba->dev, "%s: set bus vote failed %d\n",
859                                 __func__, err);
860
861 out:
862         return err;
863 }
864
865 static ssize_t
866 show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
867                         char *buf)
868 {
869         struct ufs_hba *hba = dev_get_drvdata(dev);
870         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
871
872         return snprintf(buf, PAGE_SIZE, "%u\n",
873                         host->bus_vote.is_max_bw_needed);
874 }
875
876 static ssize_t
877 store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
878                 const char *buf, size_t count)
879 {
880         struct ufs_hba *hba = dev_get_drvdata(dev);
881         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
882         uint32_t value;
883
884         if (!kstrtou32(buf, 0, &value)) {
885                 host->bus_vote.is_max_bw_needed = !!value;
886                 ufs_qcom_update_bus_bw_vote(host);
887         }
888
889         return count;
890 }
891
892 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
893 {
894         int err;
895         struct device *dev = host->hba->dev;
896         struct device_node *np = dev->of_node;
897
898         err = of_property_count_strings(np, "qcom,bus-vector-names");
899         if (err < 0 ) {
900                 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
901                                 __func__, err);
902                 goto out;
903         }
904
905         /* cache the vote index for minimum and maximum bandwidth */
906         host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
907         host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
908
909         host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
910         host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
911         sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
912         host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
913         host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
914         err = device_create_file(dev, &host->bus_vote.max_bus_bw);
915 out:
916         return err;
917 }
918
919 #define ANDROID_BOOT_DEV_MAX    30
920 static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
921
922 #ifndef MODULE
923 static int __init get_android_boot_dev(char *str)
924 {
925         strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
926         return 1;
927 }
928 __setup("androidboot.bootdevice=", get_android_boot_dev);
929 #endif
930
931 /**
932  * ufs_qcom_init - bind phy with controller
933  * @hba: host controller instance
934  *
935  * Binds PHY with controller and powers up PHY enabling clocks
936  * and regulators.
937  *
938  * Returns -EPROBE_DEFER if binding fails, returns negative error
939  * on phy power up failure and returns zero on success.
940  */
941 static int ufs_qcom_init(struct ufs_hba *hba)
942 {
943         int err;
944         struct device *dev = hba->dev;
945         struct ufs_qcom_host *host;
946
947         if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
948                 return -ENODEV;
949
950         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
951         if (!host) {
952                 err = -ENOMEM;
953                 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
954                 goto out;
955         }
956
957         host->hba = hba;
958         ufshcd_set_variant(hba, host);
959
960         host->generic_phy = devm_phy_get(dev, "ufsphy");
961
962         if (IS_ERR(host->generic_phy)) {
963                 err = PTR_ERR(host->generic_phy);
964                 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
965                 goto out;
966         }
967
968         err = ufs_qcom_bus_register(host);
969         if (err)
970                 goto out_host_free;
971
972         ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
973                 &host->hw_ver.minor, &host->hw_ver.step);
974
975         /* update phy revision information before calling phy_init() */
976         ufs_qcom_phy_save_controller_version(host->generic_phy,
977                 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
978
979         phy_init(host->generic_phy);
980         err = phy_power_on(host->generic_phy);
981         if (err)
982                 goto out_unregister_bus;
983
984         err = ufs_qcom_init_lane_clks(host);
985         if (err)
986                 goto out_disable_phy;
987
988         ufs_qcom_set_caps(hba);
989         ufs_qcom_advertise_quirks(hba);
990
991         hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING;
992         hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
993
994         ufs_qcom_setup_clocks(hba, true);
995
996         if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
997                 ufs_qcom_hosts[hba->dev->id] = host;
998
999         goto out;
1000
1001 out_disable_phy:
1002         phy_power_off(host->generic_phy);
1003 out_unregister_bus:
1004         phy_exit(host->generic_phy);
1005 out_host_free:
1006         devm_kfree(dev, host);
1007         ufshcd_set_variant(hba, NULL);
1008 out:
1009         return err;
1010 }
1011
1012 static void ufs_qcom_exit(struct ufs_hba *hba)
1013 {
1014         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1015
1016         ufs_qcom_disable_lane_clks(host);
1017         phy_power_off(host->generic_phy);
1018 }
1019
1020 static
1021 void ufs_qcom_clk_scale_notify(struct ufs_hba *hba)
1022 {
1023         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1024         struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1025
1026         if (!dev_req_params)
1027                 return;
1028
1029         ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
1030                                 dev_req_params->pwr_rx,
1031                                 dev_req_params->hs_rate);
1032 }
1033
1034 /**
1035  * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1036  *
1037  * The variant operations configure the necessary controller and PHY
1038  * handshake during initialization.
1039  */
1040 static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1041         .name                   = "qcom",
1042         .init                   = ufs_qcom_init,
1043         .exit                   = ufs_qcom_exit,
1044         .get_ufs_hci_version    = ufs_qcom_get_ufs_hci_version,
1045         .clk_scale_notify       = ufs_qcom_clk_scale_notify,
1046         .setup_clocks           = ufs_qcom_setup_clocks,
1047         .hce_enable_notify      = ufs_qcom_hce_enable_notify,
1048         .link_startup_notify    = ufs_qcom_link_startup_notify,
1049         .pwr_change_notify      = ufs_qcom_pwr_change_notify,
1050         .suspend                = ufs_qcom_suspend,
1051         .resume                 = ufs_qcom_resume,
1052 };
1053
1054 /**
1055  * ufs_qcom_probe - probe routine of the driver
1056  * @pdev: pointer to Platform device handle
1057  *
1058  * Return zero for success and non-zero for failure
1059  */
1060 static int ufs_qcom_probe(struct platform_device *pdev)
1061 {
1062         int err;
1063         struct device *dev = &pdev->dev;
1064
1065         /* Perform generic probe */
1066         err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1067         if (err)
1068                 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1069
1070         return err;
1071 }
1072
1073 /**
1074  * ufs_qcom_remove - set driver_data of the device to NULL
1075  * @pdev: pointer to platform device handle
1076  *
1077  * Always return 0
1078  */
1079 static int ufs_qcom_remove(struct platform_device *pdev)
1080 {
1081         struct ufs_hba *hba =  platform_get_drvdata(pdev);
1082
1083         pm_runtime_get_sync(&(pdev)->dev);
1084         ufshcd_remove(hba);
1085         return 0;
1086 }
1087
1088 static const struct of_device_id ufs_qcom_of_match[] = {
1089         { .compatible = "qcom,ufshc"},
1090         {},
1091 };
1092
1093 static const struct dev_pm_ops ufs_qcom_pm_ops = {
1094         .suspend        = ufshcd_pltfrm_suspend,
1095         .resume         = ufshcd_pltfrm_resume,
1096         .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1097         .runtime_resume  = ufshcd_pltfrm_runtime_resume,
1098         .runtime_idle    = ufshcd_pltfrm_runtime_idle,
1099 };
1100
1101 static struct platform_driver ufs_qcom_pltform = {
1102         .probe  = ufs_qcom_probe,
1103         .remove = ufs_qcom_remove,
1104         .shutdown = ufshcd_pltfrm_shutdown,
1105         .driver = {
1106                 .name   = "ufshcd-qcom",
1107                 .pm     = &ufs_qcom_pm_ops,
1108                 .of_match_table = of_match_ptr(ufs_qcom_of_match),
1109         },
1110 };
1111 module_platform_driver(ufs_qcom_pltform);
1112
1113 MODULE_LICENSE("GPL v2");