02ad79229f65ecf0b50b9bf29a1f63c0d297910e
[cascardo/linux.git] / drivers / mmc / core / mmc.c
1 /*
2  *  linux/drivers/mmc/core/mmc.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
6  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/stat.h>
16 #include <linux/pm_runtime.h>
17
18 #include <linux/mmc/host.h>
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/mmc.h>
21
22 #include "core.h"
23 #include "bus.h"
24 #include "mmc_ops.h"
25 #include "sd_ops.h"
26
27 static const unsigned int tran_exp[] = {
28         10000,          100000,         1000000,        10000000,
29         0,              0,              0,              0
30 };
31
32 static const unsigned char tran_mant[] = {
33         0,      10,     12,     13,     15,     20,     25,     30,
34         35,     40,     45,     50,     55,     60,     70,     80,
35 };
36
37 static const unsigned int tacc_exp[] = {
38         1,      10,     100,    1000,   10000,  100000, 1000000, 10000000,
39 };
40
41 static const unsigned int tacc_mant[] = {
42         0,      10,     12,     13,     15,     20,     25,     30,
43         35,     40,     45,     50,     55,     60,     70,     80,
44 };
45
46 #define UNSTUFF_BITS(resp,start,size)                                   \
47         ({                                                              \
48                 const int __size = size;                                \
49                 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
50                 const int __off = 3 - ((start) / 32);                   \
51                 const int __shft = (start) & 31;                        \
52                 u32 __res;                                              \
53                                                                         \
54                 __res = resp[__off] >> __shft;                          \
55                 if (__size + __shft > 32)                               \
56                         __res |= resp[__off-1] << ((32 - __shft) % 32); \
57                 __res & __mask;                                         \
58         })
59
60 /*
61  * Given the decoded CSD structure, decode the raw CID to our CID structure.
62  */
63 static int mmc_decode_cid(struct mmc_card *card)
64 {
65         u32 *resp = card->raw_cid;
66
67         /*
68          * The selection of the format here is based upon published
69          * specs from sandisk and from what people have reported.
70          */
71         switch (card->csd.mmca_vsn) {
72         case 0: /* MMC v1.0 - v1.2 */
73         case 1: /* MMC v1.4 */
74                 card->cid.manfid        = UNSTUFF_BITS(resp, 104, 24);
75                 card->cid.prod_name[0]  = UNSTUFF_BITS(resp, 96, 8);
76                 card->cid.prod_name[1]  = UNSTUFF_BITS(resp, 88, 8);
77                 card->cid.prod_name[2]  = UNSTUFF_BITS(resp, 80, 8);
78                 card->cid.prod_name[3]  = UNSTUFF_BITS(resp, 72, 8);
79                 card->cid.prod_name[4]  = UNSTUFF_BITS(resp, 64, 8);
80                 card->cid.prod_name[5]  = UNSTUFF_BITS(resp, 56, 8);
81                 card->cid.prod_name[6]  = UNSTUFF_BITS(resp, 48, 8);
82                 card->cid.hwrev         = UNSTUFF_BITS(resp, 44, 4);
83                 card->cid.fwrev         = UNSTUFF_BITS(resp, 40, 4);
84                 card->cid.serial        = UNSTUFF_BITS(resp, 16, 24);
85                 card->cid.month         = UNSTUFF_BITS(resp, 12, 4);
86                 card->cid.year          = UNSTUFF_BITS(resp, 8, 4) + 1997;
87                 break;
88
89         case 2: /* MMC v2.0 - v2.2 */
90         case 3: /* MMC v3.1 - v3.3 */
91         case 4: /* MMC v4 */
92                 card->cid.manfid        = UNSTUFF_BITS(resp, 120, 8);
93                 card->cid.oemid         = UNSTUFF_BITS(resp, 104, 16);
94                 card->cid.prod_name[0]  = UNSTUFF_BITS(resp, 96, 8);
95                 card->cid.prod_name[1]  = UNSTUFF_BITS(resp, 88, 8);
96                 card->cid.prod_name[2]  = UNSTUFF_BITS(resp, 80, 8);
97                 card->cid.prod_name[3]  = UNSTUFF_BITS(resp, 72, 8);
98                 card->cid.prod_name[4]  = UNSTUFF_BITS(resp, 64, 8);
99                 card->cid.prod_name[5]  = UNSTUFF_BITS(resp, 56, 8);
100                 card->cid.prv           = UNSTUFF_BITS(resp, 48, 8);
101                 card->cid.serial        = UNSTUFF_BITS(resp, 16, 32);
102                 card->cid.month         = UNSTUFF_BITS(resp, 12, 4);
103                 card->cid.year          = UNSTUFF_BITS(resp, 8, 4) + 1997;
104                 break;
105
106         default:
107                 pr_err("%s: card has unknown MMCA version %d\n",
108                         mmc_hostname(card->host), card->csd.mmca_vsn);
109                 return -EINVAL;
110         }
111
112         return 0;
113 }
114
115 static void mmc_set_erase_size(struct mmc_card *card)
116 {
117         if (card->ext_csd.erase_group_def & 1)
118                 card->erase_size = card->ext_csd.hc_erase_size;
119         else
120                 card->erase_size = card->csd.erase_size;
121
122         mmc_init_erase(card);
123 }
124
125 /*
126  * Given a 128-bit response, decode to our card CSD structure.
127  */
128 static int mmc_decode_csd(struct mmc_card *card)
129 {
130         struct mmc_csd *csd = &card->csd;
131         unsigned int e, m, a, b;
132         u32 *resp = card->raw_csd;
133
134         /*
135          * We only understand CSD structure v1.1 and v1.2.
136          * v1.2 has extra information in bits 15, 11 and 10.
137          * We also support eMMC v4.4 & v4.41.
138          */
139         csd->structure = UNSTUFF_BITS(resp, 126, 2);
140         if (csd->structure == 0) {
141                 pr_err("%s: unrecognised CSD structure version %d\n",
142                         mmc_hostname(card->host), csd->structure);
143                 return -EINVAL;
144         }
145
146         csd->mmca_vsn    = UNSTUFF_BITS(resp, 122, 4);
147         m = UNSTUFF_BITS(resp, 115, 4);
148         e = UNSTUFF_BITS(resp, 112, 3);
149         csd->tacc_ns     = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
150         csd->tacc_clks   = UNSTUFF_BITS(resp, 104, 8) * 100;
151
152         m = UNSTUFF_BITS(resp, 99, 4);
153         e = UNSTUFF_BITS(resp, 96, 3);
154         csd->max_dtr      = tran_exp[e] * tran_mant[m];
155         csd->cmdclass     = UNSTUFF_BITS(resp, 84, 12);
156
157         e = UNSTUFF_BITS(resp, 47, 3);
158         m = UNSTUFF_BITS(resp, 62, 12);
159         csd->capacity     = (1 + m) << (e + 2);
160
161         csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
162         csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
163         csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
164         csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
165         csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
166         csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
167         csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
168         csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
169
170         if (csd->write_blkbits >= 9) {
171                 a = UNSTUFF_BITS(resp, 42, 5);
172                 b = UNSTUFF_BITS(resp, 37, 5);
173                 csd->erase_size = (a + 1) * (b + 1);
174                 csd->erase_size <<= csd->write_blkbits - 9;
175         }
176
177         return 0;
178 }
179
180 static void mmc_select_card_type(struct mmc_card *card)
181 {
182         struct mmc_host *host = card->host;
183         u8 card_type = card->ext_csd.raw_card_type;
184         u32 caps = host->caps, caps2 = host->caps2;
185         unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
186         unsigned int avail_type = 0;
187
188         if (caps & MMC_CAP_MMC_HIGHSPEED &&
189             card_type & EXT_CSD_CARD_TYPE_HS_26) {
190                 hs_max_dtr = MMC_HIGH_26_MAX_DTR;
191                 avail_type |= EXT_CSD_CARD_TYPE_HS_26;
192         }
193
194         if (caps & MMC_CAP_MMC_HIGHSPEED &&
195             card_type & EXT_CSD_CARD_TYPE_HS_52) {
196                 hs_max_dtr = MMC_HIGH_52_MAX_DTR;
197                 avail_type |= EXT_CSD_CARD_TYPE_HS_52;
198         }
199
200         if (caps & MMC_CAP_1_8V_DDR &&
201             card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
202                 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
203                 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
204         }
205
206         if (caps & MMC_CAP_1_2V_DDR &&
207             card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
208                 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
209                 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
210         }
211
212         if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
213             card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
214                 hs200_max_dtr = MMC_HS200_MAX_DTR;
215                 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
216         }
217
218         if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
219             card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
220                 hs200_max_dtr = MMC_HS200_MAX_DTR;
221                 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
222         }
223
224         if (caps2 & MMC_CAP2_HS400_1_8V &&
225             card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
226                 hs200_max_dtr = MMC_HS200_MAX_DTR;
227                 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
228         }
229
230         if (caps2 & MMC_CAP2_HS400_1_2V &&
231             card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
232                 hs200_max_dtr = MMC_HS200_MAX_DTR;
233                 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
234         }
235
236         card->ext_csd.hs_max_dtr = hs_max_dtr;
237         card->ext_csd.hs200_max_dtr = hs200_max_dtr;
238         card->mmc_avail_type = avail_type;
239 }
240
241 static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
242 {
243         u8 hc_erase_grp_sz, hc_wp_grp_sz;
244
245         /*
246          * Disable these attributes by default
247          */
248         card->ext_csd.enhanced_area_offset = -EINVAL;
249         card->ext_csd.enhanced_area_size = -EINVAL;
250
251         /*
252          * Enhanced area feature support -- check whether the eMMC
253          * card has the Enhanced area enabled.  If so, export enhanced
254          * area offset and size to user by adding sysfs interface.
255          */
256         if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
257             (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
258                 if (card->ext_csd.partition_setting_completed) {
259                         hc_erase_grp_sz =
260                                 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
261                         hc_wp_grp_sz =
262                                 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
263
264                         /*
265                          * calculate the enhanced data area offset, in bytes
266                          */
267                         card->ext_csd.enhanced_area_offset =
268                                 (ext_csd[139] << 24) + (ext_csd[138] << 16) +
269                                 (ext_csd[137] << 8) + ext_csd[136];
270                         if (mmc_card_blockaddr(card))
271                                 card->ext_csd.enhanced_area_offset <<= 9;
272                         /*
273                          * calculate the enhanced data area size, in kilobytes
274                          */
275                         card->ext_csd.enhanced_area_size =
276                                 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
277                                 ext_csd[140];
278                         card->ext_csd.enhanced_area_size *=
279                                 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
280                         card->ext_csd.enhanced_area_size <<= 9;
281                 } else {
282                         pr_warn("%s: defines enhanced area without partition setting complete\n",
283                                 mmc_hostname(card->host));
284                 }
285         }
286 }
287
288 static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
289 {
290         int idx;
291         u8 hc_erase_grp_sz, hc_wp_grp_sz;
292         unsigned int part_size;
293
294         /*
295          * General purpose partition feature support --
296          * If ext_csd has the size of general purpose partitions,
297          * set size, part_cfg, partition name in mmc_part.
298          */
299         if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
300             EXT_CSD_PART_SUPPORT_PART_EN) {
301                 hc_erase_grp_sz =
302                         ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
303                 hc_wp_grp_sz =
304                         ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
305
306                 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
307                         if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
308                             !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
309                             !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
310                                 continue;
311                         if (card->ext_csd.partition_setting_completed == 0) {
312                                 pr_warn("%s: has partition size defined without partition complete\n",
313                                         mmc_hostname(card->host));
314                                 break;
315                         }
316                         part_size =
317                                 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
318                                 << 16) +
319                                 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
320                                 << 8) +
321                                 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
322                         part_size *= (size_t)(hc_erase_grp_sz *
323                                 hc_wp_grp_sz);
324                         mmc_part_add(card, part_size << 19,
325                                 EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
326                                 "gp%d", idx, false,
327                                 MMC_BLK_DATA_AREA_GP);
328                 }
329         }
330 }
331
332 /*
333  * Decode extended CSD.
334  */
335 static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
336 {
337         int err = 0, idx;
338         unsigned int part_size;
339
340         /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
341         card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
342         if (card->csd.structure == 3) {
343                 if (card->ext_csd.raw_ext_csd_structure > 2) {
344                         pr_err("%s: unrecognised EXT_CSD structure "
345                                 "version %d\n", mmc_hostname(card->host),
346                                         card->ext_csd.raw_ext_csd_structure);
347                         err = -EINVAL;
348                         goto out;
349                 }
350         }
351
352         /*
353          * The EXT_CSD format is meant to be forward compatible. As long
354          * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
355          * are authorized, see JEDEC JESD84-B50 section B.8.
356          */
357         card->ext_csd.rev = ext_csd[EXT_CSD_REV];
358
359         card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
360         card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
361         card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
362         card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
363         if (card->ext_csd.rev >= 2) {
364                 card->ext_csd.sectors =
365                         ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
366                         ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
367                         ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
368                         ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
369
370                 /* Cards with density > 2GiB are sector addressed */
371                 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
372                         mmc_card_set_blockaddr(card);
373         }
374
375         card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
376         mmc_select_card_type(card);
377
378         card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
379         card->ext_csd.raw_erase_timeout_mult =
380                 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
381         card->ext_csd.raw_hc_erase_grp_size =
382                 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
383         if (card->ext_csd.rev >= 3) {
384                 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
385                 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
386
387                 /* EXT_CSD value is in units of 10ms, but we store in ms */
388                 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
389
390                 /* Sleep / awake timeout in 100ns units */
391                 if (sa_shift > 0 && sa_shift <= 0x17)
392                         card->ext_csd.sa_timeout =
393                                         1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
394                 card->ext_csd.erase_group_def =
395                         ext_csd[EXT_CSD_ERASE_GROUP_DEF];
396                 card->ext_csd.hc_erase_timeout = 300 *
397                         ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
398                 card->ext_csd.hc_erase_size =
399                         ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
400
401                 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
402
403                 /*
404                  * There are two boot regions of equal size, defined in
405                  * multiples of 128K.
406                  */
407                 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
408                         for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
409                                 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
410                                 mmc_part_add(card, part_size,
411                                         EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
412                                         "boot%d", idx, true,
413                                         MMC_BLK_DATA_AREA_BOOT);
414                         }
415                 }
416         }
417
418         card->ext_csd.raw_hc_erase_gap_size =
419                 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
420         card->ext_csd.raw_sec_trim_mult =
421                 ext_csd[EXT_CSD_SEC_TRIM_MULT];
422         card->ext_csd.raw_sec_erase_mult =
423                 ext_csd[EXT_CSD_SEC_ERASE_MULT];
424         card->ext_csd.raw_sec_feature_support =
425                 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
426         card->ext_csd.raw_trim_mult =
427                 ext_csd[EXT_CSD_TRIM_MULT];
428         card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
429         if (card->ext_csd.rev >= 4) {
430                 if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
431                     EXT_CSD_PART_SETTING_COMPLETED)
432                         card->ext_csd.partition_setting_completed = 1;
433                 else
434                         card->ext_csd.partition_setting_completed = 0;
435
436                 mmc_manage_enhanced_area(card, ext_csd);
437
438                 mmc_manage_gp_partitions(card, ext_csd);
439
440                 card->ext_csd.sec_trim_mult =
441                         ext_csd[EXT_CSD_SEC_TRIM_MULT];
442                 card->ext_csd.sec_erase_mult =
443                         ext_csd[EXT_CSD_SEC_ERASE_MULT];
444                 card->ext_csd.sec_feature_support =
445                         ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
446                 card->ext_csd.trim_timeout = 300 *
447                         ext_csd[EXT_CSD_TRIM_MULT];
448
449                 /*
450                  * Note that the call to mmc_part_add above defaults to read
451                  * only. If this default assumption is changed, the call must
452                  * take into account the value of boot_locked below.
453                  */
454                 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
455                 card->ext_csd.boot_ro_lockable = true;
456
457                 /* Save power class values */
458                 card->ext_csd.raw_pwr_cl_52_195 =
459                         ext_csd[EXT_CSD_PWR_CL_52_195];
460                 card->ext_csd.raw_pwr_cl_26_195 =
461                         ext_csd[EXT_CSD_PWR_CL_26_195];
462                 card->ext_csd.raw_pwr_cl_52_360 =
463                         ext_csd[EXT_CSD_PWR_CL_52_360];
464                 card->ext_csd.raw_pwr_cl_26_360 =
465                         ext_csd[EXT_CSD_PWR_CL_26_360];
466                 card->ext_csd.raw_pwr_cl_200_195 =
467                         ext_csd[EXT_CSD_PWR_CL_200_195];
468                 card->ext_csd.raw_pwr_cl_200_360 =
469                         ext_csd[EXT_CSD_PWR_CL_200_360];
470                 card->ext_csd.raw_pwr_cl_ddr_52_195 =
471                         ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
472                 card->ext_csd.raw_pwr_cl_ddr_52_360 =
473                         ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
474                 card->ext_csd.raw_pwr_cl_ddr_200_360 =
475                         ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
476         }
477
478         if (card->ext_csd.rev >= 5) {
479                 /* Adjust production date as per JEDEC JESD84-B451 */
480                 if (card->cid.year < 2010)
481                         card->cid.year += 16;
482
483                 /* check whether the eMMC card supports BKOPS */
484                 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
485                         card->ext_csd.bkops = 1;
486                         card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
487                         card->ext_csd.raw_bkops_status =
488                                 ext_csd[EXT_CSD_BKOPS_STATUS];
489                         if (!card->ext_csd.bkops_en)
490                                 pr_info("%s: BKOPS_EN bit is not set\n",
491                                         mmc_hostname(card->host));
492                 }
493
494                 /* check whether the eMMC card supports HPI */
495                 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
496                         card->ext_csd.hpi = 1;
497                         if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
498                                 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
499                         else
500                                 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
501                         /*
502                          * Indicate the maximum timeout to close
503                          * a command interrupted by HPI
504                          */
505                         card->ext_csd.out_of_int_time =
506                                 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
507                 }
508
509                 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
510                 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
511
512                 /*
513                  * RPMB regions are defined in multiples of 128K.
514                  */
515                 card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
516                 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
517                         mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
518                                 EXT_CSD_PART_CONFIG_ACC_RPMB,
519                                 "rpmb", 0, false,
520                                 MMC_BLK_DATA_AREA_RPMB);
521                 }
522         }
523
524         card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
525         if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
526                 card->erased_byte = 0xFF;
527         else
528                 card->erased_byte = 0x0;
529
530         /* eMMC v4.5 or later */
531         if (card->ext_csd.rev >= 6) {
532                 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
533
534                 card->ext_csd.generic_cmd6_time = 10 *
535                         ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
536                 card->ext_csd.power_off_longtime = 10 *
537                         ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
538
539                 card->ext_csd.cache_size =
540                         ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
541                         ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
542                         ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
543                         ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
544
545                 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
546                         card->ext_csd.data_sector_size = 4096;
547                 else
548                         card->ext_csd.data_sector_size = 512;
549
550                 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
551                     (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
552                         card->ext_csd.data_tag_unit_size =
553                         ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
554                         (card->ext_csd.data_sector_size);
555                 } else {
556                         card->ext_csd.data_tag_unit_size = 0;
557                 }
558
559                 card->ext_csd.max_packed_writes =
560                         ext_csd[EXT_CSD_MAX_PACKED_WRITES];
561                 card->ext_csd.max_packed_reads =
562                         ext_csd[EXT_CSD_MAX_PACKED_READS];
563         } else {
564                 card->ext_csd.data_sector_size = 512;
565         }
566
567         /* eMMC v5 or later */
568         if (card->ext_csd.rev >= 7) {
569                 memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
570                        MMC_FIRMWARE_LEN);
571                 card->ext_csd.ffu_capable =
572                         (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
573                         !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
574         }
575 out:
576         return err;
577 }
578
579 static int mmc_read_ext_csd(struct mmc_card *card)
580 {
581         u8 *ext_csd;
582         int err;
583
584         if (!mmc_can_ext_csd(card))
585                 return 0;
586
587         err = mmc_get_ext_csd(card, &ext_csd);
588         if (err) {
589                 /* If the host or the card can't do the switch,
590                  * fail more gracefully. */
591                 if ((err != -EINVAL)
592                  && (err != -ENOSYS)
593                  && (err != -EFAULT))
594                         return err;
595
596                 /*
597                  * High capacity cards should have this "magic" size
598                  * stored in their CSD.
599                  */
600                 if (card->csd.capacity == (4096 * 512)) {
601                         pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
602                                 mmc_hostname(card->host));
603                 } else {
604                         pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
605                                 mmc_hostname(card->host));
606                         err = 0;
607                 }
608
609                 return err;
610         }
611
612         err = mmc_decode_ext_csd(card, ext_csd);
613         kfree(ext_csd);
614         return err;
615 }
616
617 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
618 {
619         u8 *bw_ext_csd;
620         int err;
621
622         if (bus_width == MMC_BUS_WIDTH_1)
623                 return 0;
624
625         err = mmc_get_ext_csd(card, &bw_ext_csd);
626         if (err)
627                 return err;
628
629         /* only compare read only fields */
630         err = !((card->ext_csd.raw_partition_support ==
631                         bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
632                 (card->ext_csd.raw_erased_mem_count ==
633                         bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
634                 (card->ext_csd.rev ==
635                         bw_ext_csd[EXT_CSD_REV]) &&
636                 (card->ext_csd.raw_ext_csd_structure ==
637                         bw_ext_csd[EXT_CSD_STRUCTURE]) &&
638                 (card->ext_csd.raw_card_type ==
639                         bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
640                 (card->ext_csd.raw_s_a_timeout ==
641                         bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
642                 (card->ext_csd.raw_hc_erase_gap_size ==
643                         bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
644                 (card->ext_csd.raw_erase_timeout_mult ==
645                         bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
646                 (card->ext_csd.raw_hc_erase_grp_size ==
647                         bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
648                 (card->ext_csd.raw_sec_trim_mult ==
649                         bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
650                 (card->ext_csd.raw_sec_erase_mult ==
651                         bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
652                 (card->ext_csd.raw_sec_feature_support ==
653                         bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
654                 (card->ext_csd.raw_trim_mult ==
655                         bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
656                 (card->ext_csd.raw_sectors[0] ==
657                         bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
658                 (card->ext_csd.raw_sectors[1] ==
659                         bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
660                 (card->ext_csd.raw_sectors[2] ==
661                         bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
662                 (card->ext_csd.raw_sectors[3] ==
663                         bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
664                 (card->ext_csd.raw_pwr_cl_52_195 ==
665                         bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
666                 (card->ext_csd.raw_pwr_cl_26_195 ==
667                         bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
668                 (card->ext_csd.raw_pwr_cl_52_360 ==
669                         bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
670                 (card->ext_csd.raw_pwr_cl_26_360 ==
671                         bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
672                 (card->ext_csd.raw_pwr_cl_200_195 ==
673                         bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
674                 (card->ext_csd.raw_pwr_cl_200_360 ==
675                         bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
676                 (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
677                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
678                 (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
679                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
680                 (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
681                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
682
683         if (err)
684                 err = -EINVAL;
685
686         kfree(bw_ext_csd);
687         return err;
688 }
689
690 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
691         card->raw_cid[2], card->raw_cid[3]);
692 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
693         card->raw_csd[2], card->raw_csd[3]);
694 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
695 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
696 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
697 MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
698 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
699 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
700 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
701 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
702 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
703 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
704 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
705                 card->ext_csd.enhanced_area_offset);
706 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
707 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
708 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
709
710 static ssize_t mmc_fwrev_show(struct device *dev,
711                               struct device_attribute *attr,
712                               char *buf)
713 {
714         struct mmc_card *card = mmc_dev_to_card(dev);
715
716         if (card->ext_csd.rev < 7) {
717                 return sprintf(buf, "0x%x\n", card->cid.fwrev);
718         } else {
719                 return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
720                                card->ext_csd.fwrev);
721         }
722 }
723
724 static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
725
726 static struct attribute *mmc_std_attrs[] = {
727         &dev_attr_cid.attr,
728         &dev_attr_csd.attr,
729         &dev_attr_date.attr,
730         &dev_attr_erase_size.attr,
731         &dev_attr_preferred_erase_size.attr,
732         &dev_attr_fwrev.attr,
733         &dev_attr_ffu_capable.attr,
734         &dev_attr_hwrev.attr,
735         &dev_attr_manfid.attr,
736         &dev_attr_name.attr,
737         &dev_attr_oemid.attr,
738         &dev_attr_prv.attr,
739         &dev_attr_serial.attr,
740         &dev_attr_enhanced_area_offset.attr,
741         &dev_attr_enhanced_area_size.attr,
742         &dev_attr_raw_rpmb_size_mult.attr,
743         &dev_attr_rel_sectors.attr,
744         NULL,
745 };
746 ATTRIBUTE_GROUPS(mmc_std);
747
748 static struct device_type mmc_type = {
749         .groups = mmc_std_groups,
750 };
751
752 /*
753  * Select the PowerClass for the current bus width
754  * If power class is defined for 4/8 bit bus in the
755  * extended CSD register, select it by executing the
756  * mmc_switch command.
757  */
758 static int __mmc_select_powerclass(struct mmc_card *card,
759                                    unsigned int bus_width)
760 {
761         struct mmc_host *host = card->host;
762         struct mmc_ext_csd *ext_csd = &card->ext_csd;
763         unsigned int pwrclass_val = 0;
764         int err = 0;
765
766         switch (1 << host->ios.vdd) {
767         case MMC_VDD_165_195:
768                 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
769                         pwrclass_val = ext_csd->raw_pwr_cl_26_195;
770                 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
771                         pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
772                                 ext_csd->raw_pwr_cl_52_195 :
773                                 ext_csd->raw_pwr_cl_ddr_52_195;
774                 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
775                         pwrclass_val = ext_csd->raw_pwr_cl_200_195;
776                 break;
777         case MMC_VDD_27_28:
778         case MMC_VDD_28_29:
779         case MMC_VDD_29_30:
780         case MMC_VDD_30_31:
781         case MMC_VDD_31_32:
782         case MMC_VDD_32_33:
783         case MMC_VDD_33_34:
784         case MMC_VDD_34_35:
785         case MMC_VDD_35_36:
786                 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
787                         pwrclass_val = ext_csd->raw_pwr_cl_26_360;
788                 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
789                         pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
790                                 ext_csd->raw_pwr_cl_52_360 :
791                                 ext_csd->raw_pwr_cl_ddr_52_360;
792                 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
793                         pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
794                                 ext_csd->raw_pwr_cl_ddr_200_360 :
795                                 ext_csd->raw_pwr_cl_200_360;
796                 break;
797         default:
798                 pr_warn("%s: Voltage range not supported for power class\n",
799                         mmc_hostname(host));
800                 return -EINVAL;
801         }
802
803         if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
804                 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
805                                 EXT_CSD_PWR_CL_8BIT_SHIFT;
806         else
807                 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
808                                 EXT_CSD_PWR_CL_4BIT_SHIFT;
809
810         /* If the power class is different from the default value */
811         if (pwrclass_val > 0) {
812                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
813                                  EXT_CSD_POWER_CLASS,
814                                  pwrclass_val,
815                                  card->ext_csd.generic_cmd6_time);
816         }
817
818         return err;
819 }
820
821 static int mmc_select_powerclass(struct mmc_card *card)
822 {
823         struct mmc_host *host = card->host;
824         u32 bus_width, ext_csd_bits;
825         int err, ddr;
826
827         /* Power class selection is supported for versions >= 4.0 */
828         if (!mmc_can_ext_csd(card))
829                 return 0;
830
831         bus_width = host->ios.bus_width;
832         /* Power class values are defined only for 4/8 bit bus */
833         if (bus_width == MMC_BUS_WIDTH_1)
834                 return 0;
835
836         ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
837         if (ddr)
838                 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
839                         EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
840         else
841                 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
842                         EXT_CSD_BUS_WIDTH_8 :  EXT_CSD_BUS_WIDTH_4;
843
844         err = __mmc_select_powerclass(card, ext_csd_bits);
845         if (err)
846                 pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
847                         mmc_hostname(host), 1 << bus_width, ddr);
848
849         return err;
850 }
851
852 /*
853  * Set the bus speed for the selected speed mode.
854  */
855 static void mmc_set_bus_speed(struct mmc_card *card)
856 {
857         unsigned int max_dtr = (unsigned int)-1;
858
859         if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
860              max_dtr > card->ext_csd.hs200_max_dtr)
861                 max_dtr = card->ext_csd.hs200_max_dtr;
862         else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
863                 max_dtr = card->ext_csd.hs_max_dtr;
864         else if (max_dtr > card->csd.max_dtr)
865                 max_dtr = card->csd.max_dtr;
866
867         mmc_set_clock(card->host, max_dtr);
868 }
869
870 /*
871  * Select the bus width amoung 4-bit and 8-bit(SDR).
872  * If the bus width is changed successfully, return the selected width value.
873  * Zero is returned instead of error value if the wide width is not supported.
874  */
875 static int mmc_select_bus_width(struct mmc_card *card)
876 {
877         static unsigned ext_csd_bits[] = {
878                 EXT_CSD_BUS_WIDTH_8,
879                 EXT_CSD_BUS_WIDTH_4,
880         };
881         static unsigned bus_widths[] = {
882                 MMC_BUS_WIDTH_8,
883                 MMC_BUS_WIDTH_4,
884         };
885         struct mmc_host *host = card->host;
886         unsigned idx, bus_width = 0;
887         int err = 0;
888
889         if (!mmc_can_ext_csd(card) &&
890             !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
891                 return 0;
892
893         idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
894
895         /*
896          * Unlike SD, MMC cards dont have a configuration register to notify
897          * supported bus width. So bus test command should be run to identify
898          * the supported bus width or compare the ext csd values of current
899          * bus width and ext csd values of 1 bit mode read earlier.
900          */
901         for (; idx < ARRAY_SIZE(bus_widths); idx++) {
902                 /*
903                  * Host is capable of 8bit transfer, then switch
904                  * the device to work in 8bit transfer mode. If the
905                  * mmc switch command returns error then switch to
906                  * 4bit transfer mode. On success set the corresponding
907                  * bus width on the host.
908                  */
909                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
910                                  EXT_CSD_BUS_WIDTH,
911                                  ext_csd_bits[idx],
912                                  card->ext_csd.generic_cmd6_time);
913                 if (err)
914                         continue;
915
916                 bus_width = bus_widths[idx];
917                 mmc_set_bus_width(host, bus_width);
918
919                 /*
920                  * If controller can't handle bus width test,
921                  * compare ext_csd previously read in 1 bit mode
922                  * against ext_csd at new bus width
923                  */
924                 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
925                         err = mmc_compare_ext_csds(card, bus_width);
926                 else
927                         err = mmc_bus_test(card, bus_width);
928
929                 if (!err) {
930                         err = bus_width;
931                         break;
932                 } else {
933                         pr_warn("%s: switch to bus width %d failed\n",
934                                 mmc_hostname(host), ext_csd_bits[idx]);
935                 }
936         }
937
938         return err;
939 }
940
941 /*
942  * Switch to the high-speed mode
943  */
944 static int mmc_select_hs(struct mmc_card *card)
945 {
946         int err;
947
948         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
949                            EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
950                            card->ext_csd.generic_cmd6_time,
951                            true, true, true);
952         if (!err)
953                 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
954
955         return err;
956 }
957
958 /*
959  * Activate wide bus and DDR if supported.
960  */
961 static int mmc_select_hs_ddr(struct mmc_card *card)
962 {
963         struct mmc_host *host = card->host;
964         u32 bus_width, ext_csd_bits;
965         int err = 0;
966
967         if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
968                 return 0;
969
970         bus_width = host->ios.bus_width;
971         if (bus_width == MMC_BUS_WIDTH_1)
972                 return 0;
973
974         ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
975                 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
976
977         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
978                         EXT_CSD_BUS_WIDTH,
979                         ext_csd_bits,
980                         card->ext_csd.generic_cmd6_time);
981         if (err) {
982                 pr_err("%s: switch to bus width %d ddr failed\n",
983                         mmc_hostname(host), 1 << bus_width);
984                 return err;
985         }
986
987         /*
988          * eMMC cards can support 3.3V to 1.2V i/o (vccq)
989          * signaling.
990          *
991          * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
992          *
993          * 1.8V vccq at 3.3V core voltage (vcc) is not required
994          * in the JEDEC spec for DDR.
995          *
996          * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
997          * host controller can support this, like some of the SDHCI
998          * controller which connect to an eMMC device. Some of these
999          * host controller still needs to use 1.8v vccq for supporting
1000          * DDR mode.
1001          *
1002          * So the sequence will be:
1003          * if (host and device can both support 1.2v IO)
1004          *      use 1.2v IO;
1005          * else if (host and device can both support 1.8v IO)
1006          *      use 1.8v IO;
1007          * so if host and device can only support 3.3v IO, this is the
1008          * last choice.
1009          *
1010          * WARNING: eMMC rules are NOT the same as SD DDR
1011          */
1012         err = -EINVAL;
1013         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
1014                 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1015
1016         if (err && (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
1017                 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1018
1019         /* make sure vccq is 3.3v after switching disaster */
1020         if (err)
1021                 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
1022
1023         if (!err)
1024                 mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
1025
1026         return err;
1027 }
1028
1029 static int mmc_select_hs400(struct mmc_card *card)
1030 {
1031         struct mmc_host *host = card->host;
1032         int err = 0;
1033
1034         /*
1035          * HS400 mode requires 8-bit bus width
1036          */
1037         if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1038               host->ios.bus_width == MMC_BUS_WIDTH_8))
1039                 return 0;
1040
1041         /*
1042          * Before switching to dual data rate operation for HS400,
1043          * it is required to convert from HS200 mode to HS mode.
1044          */
1045         mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1046         mmc_set_bus_speed(card);
1047
1048         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1049                            EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1050                            card->ext_csd.generic_cmd6_time,
1051                            true, true, true);
1052         if (err) {
1053                 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1054                         mmc_hostname(host), err);
1055                 return err;
1056         }
1057
1058         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1059                          EXT_CSD_BUS_WIDTH,
1060                          EXT_CSD_DDR_BUS_WIDTH_8,
1061                          card->ext_csd.generic_cmd6_time);
1062         if (err) {
1063                 pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
1064                         mmc_hostname(host), err);
1065                 return err;
1066         }
1067
1068         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1069                            EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400,
1070                            card->ext_csd.generic_cmd6_time,
1071                            true, true, true);
1072         if (err) {
1073                 pr_err("%s: switch to hs400 failed, err:%d\n",
1074                          mmc_hostname(host), err);
1075                 return err;
1076         }
1077
1078         mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1079         mmc_set_bus_speed(card);
1080
1081         return 0;
1082 }
1083
1084 /*
1085  * For device supporting HS200 mode, the following sequence
1086  * should be done before executing the tuning process.
1087  * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
1088  * 2. switch to HS200 mode
1089  * 3. set the clock to > 52Mhz and <=200MHz
1090  */
1091 static int mmc_select_hs200(struct mmc_card *card)
1092 {
1093         struct mmc_host *host = card->host;
1094         int err = -EINVAL;
1095
1096         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
1097                 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1098
1099         if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
1100                 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1101
1102         /* If fails try again during next card power cycle */
1103         if (err)
1104                 goto err;
1105
1106         /*
1107          * Set the bus width(4 or 8) with host's support and
1108          * switch to HS200 mode if bus width is set successfully.
1109          */
1110         err = mmc_select_bus_width(card);
1111         if (!IS_ERR_VALUE(err)) {
1112                 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1113                                    EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200,
1114                                    card->ext_csd.generic_cmd6_time,
1115                                    true, true, true);
1116                 if (!err)
1117                         mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1118         }
1119 err:
1120         return err;
1121 }
1122
1123 /*
1124  * Activate High Speed or HS200 mode if supported.
1125  */
1126 static int mmc_select_timing(struct mmc_card *card)
1127 {
1128         int err = 0;
1129
1130         if (!mmc_can_ext_csd(card))
1131                 goto bus_speed;
1132
1133         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
1134                 err = mmc_select_hs200(card);
1135         else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
1136                 err = mmc_select_hs(card);
1137
1138         if (err && err != -EBADMSG)
1139                 return err;
1140
1141         if (err) {
1142                 pr_warn("%s: switch to %s failed\n",
1143                         mmc_card_hs(card) ? "high-speed" :
1144                         (mmc_card_hs200(card) ? "hs200" : ""),
1145                         mmc_hostname(card->host));
1146                 err = 0;
1147         }
1148
1149 bus_speed:
1150         /*
1151          * Set the bus speed to the selected bus timing.
1152          * If timing is not selected, backward compatible is the default.
1153          */
1154         mmc_set_bus_speed(card);
1155         return err;
1156 }
1157
1158 const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE] = {
1159         0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
1160         0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
1161         0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
1162         0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
1163         0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
1164         0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
1165         0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
1166         0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
1167 };
1168 EXPORT_SYMBOL(tuning_blk_pattern_4bit);
1169
1170 const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE] = {
1171         0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
1172         0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
1173         0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
1174         0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
1175         0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
1176         0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
1177         0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
1178         0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
1179         0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
1180         0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
1181         0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
1182         0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
1183         0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
1184         0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
1185         0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
1186         0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
1187 };
1188 EXPORT_SYMBOL(tuning_blk_pattern_8bit);
1189
1190 /*
1191  * Execute tuning sequence to seek the proper bus operating
1192  * conditions for HS200 and HS400, which sends CMD21 to the device.
1193  */
1194 static int mmc_hs200_tuning(struct mmc_card *card)
1195 {
1196         struct mmc_host *host = card->host;
1197         int err = 0;
1198
1199         /*
1200          * Timing should be adjusted to the HS400 target
1201          * operation frequency for tuning process
1202          */
1203         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1204             host->ios.bus_width == MMC_BUS_WIDTH_8)
1205                 if (host->ops->prepare_hs400_tuning)
1206                         host->ops->prepare_hs400_tuning(host, &host->ios);
1207
1208         if (host->ops->execute_tuning) {
1209                 mmc_host_clk_hold(host);
1210                 err = host->ops->execute_tuning(host,
1211                                 MMC_SEND_TUNING_BLOCK_HS200);
1212                 mmc_host_clk_release(host);
1213
1214                 if (err)
1215                         pr_err("%s: tuning execution failed\n",
1216                                 mmc_hostname(host));
1217         }
1218
1219         return err;
1220 }
1221
1222 /*
1223  * Handle the detection and initialisation of a card.
1224  *
1225  * In the case of a resume, "oldcard" will contain the card
1226  * we're trying to reinitialise.
1227  */
1228 static int mmc_init_card(struct mmc_host *host, u32 ocr,
1229         struct mmc_card *oldcard)
1230 {
1231         struct mmc_card *card;
1232         int err;
1233         u32 cid[4];
1234         u32 rocr;
1235
1236         BUG_ON(!host);
1237         WARN_ON(!host->claimed);
1238
1239         /* Set correct bus mode for MMC before attempting init */
1240         if (!mmc_host_is_spi(host))
1241                 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1242
1243         /*
1244          * Since we're changing the OCR value, we seem to
1245          * need to tell some cards to go back to the idle
1246          * state.  We wait 1ms to give cards time to
1247          * respond.
1248          * mmc_go_idle is needed for eMMC that are asleep
1249          */
1250         mmc_go_idle(host);
1251
1252         /* The extra bit indicates that we support high capacity */
1253         err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
1254         if (err)
1255                 goto err;
1256
1257         /*
1258          * For SPI, enable CRC as appropriate.
1259          */
1260         if (mmc_host_is_spi(host)) {
1261                 err = mmc_spi_set_crc(host, use_spi_crc);
1262                 if (err)
1263                         goto err;
1264         }
1265
1266         /*
1267          * Fetch CID from card.
1268          */
1269         if (mmc_host_is_spi(host))
1270                 err = mmc_send_cid(host, cid);
1271         else
1272                 err = mmc_all_send_cid(host, cid);
1273         if (err)
1274                 goto err;
1275
1276         if (oldcard) {
1277                 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
1278                         err = -ENOENT;
1279                         goto err;
1280                 }
1281
1282                 card = oldcard;
1283         } else {
1284                 /*
1285                  * Allocate card structure.
1286                  */
1287                 card = mmc_alloc_card(host, &mmc_type);
1288                 if (IS_ERR(card)) {
1289                         err = PTR_ERR(card);
1290                         goto err;
1291                 }
1292
1293                 card->ocr = ocr;
1294                 card->type = MMC_TYPE_MMC;
1295                 card->rca = 1;
1296                 memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
1297         }
1298
1299         /*
1300          * For native busses:  set card RCA and quit open drain mode.
1301          */
1302         if (!mmc_host_is_spi(host)) {
1303                 err = mmc_set_relative_addr(card);
1304                 if (err)
1305                         goto free_card;
1306
1307                 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
1308         }
1309
1310         if (!oldcard) {
1311                 /*
1312                  * Fetch CSD from card.
1313                  */
1314                 err = mmc_send_csd(card, card->raw_csd);
1315                 if (err)
1316                         goto free_card;
1317
1318                 err = mmc_decode_csd(card);
1319                 if (err)
1320                         goto free_card;
1321                 err = mmc_decode_cid(card);
1322                 if (err)
1323                         goto free_card;
1324         }
1325
1326         /*
1327          * handling only for cards supporting DSR and hosts requesting
1328          * DSR configuration
1329          */
1330         if (card->csd.dsr_imp && host->dsr_req)
1331                 mmc_set_dsr(host);
1332
1333         /*
1334          * Select card, as all following commands rely on that.
1335          */
1336         if (!mmc_host_is_spi(host)) {
1337                 err = mmc_select_card(card);
1338                 if (err)
1339                         goto free_card;
1340         }
1341
1342         if (!oldcard) {
1343                 /* Read extended CSD. */
1344                 err = mmc_read_ext_csd(card);
1345                 if (err)
1346                         goto free_card;
1347
1348                 /* If doing byte addressing, check if required to do sector
1349                  * addressing.  Handle the case of <2GB cards needing sector
1350                  * addressing.  See section 8.1 JEDEC Standard JED84-A441;
1351                  * ocr register has bit 30 set for sector addressing.
1352                  */
1353                 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
1354                         mmc_card_set_blockaddr(card);
1355
1356                 /* Erase size depends on CSD and Extended CSD */
1357                 mmc_set_erase_size(card);
1358         }
1359
1360         /*
1361          * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
1362          * bit.  This bit will be lost every time after a reset or power off.
1363          */
1364         if (card->ext_csd.partition_setting_completed ||
1365             (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
1366                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1367                                  EXT_CSD_ERASE_GROUP_DEF, 1,
1368                                  card->ext_csd.generic_cmd6_time);
1369
1370                 if (err && err != -EBADMSG)
1371                         goto free_card;
1372
1373                 if (err) {
1374                         err = 0;
1375                         /*
1376                          * Just disable enhanced area off & sz
1377                          * will try to enable ERASE_GROUP_DEF
1378                          * during next time reinit
1379                          */
1380                         card->ext_csd.enhanced_area_offset = -EINVAL;
1381                         card->ext_csd.enhanced_area_size = -EINVAL;
1382                 } else {
1383                         card->ext_csd.erase_group_def = 1;
1384                         /*
1385                          * enable ERASE_GRP_DEF successfully.
1386                          * This will affect the erase size, so
1387                          * here need to reset erase size
1388                          */
1389                         mmc_set_erase_size(card);
1390                 }
1391         }
1392
1393         /*
1394          * Ensure eMMC user default partition is enabled
1395          */
1396         if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
1397                 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1398                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
1399                                  card->ext_csd.part_config,
1400                                  card->ext_csd.part_time);
1401                 if (err && err != -EBADMSG)
1402                         goto free_card;
1403         }
1404
1405         /*
1406          * Enable power_off_notification byte in the ext_csd register
1407          */
1408         if (card->ext_csd.rev >= 6) {
1409                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1410                                  EXT_CSD_POWER_OFF_NOTIFICATION,
1411                                  EXT_CSD_POWER_ON,
1412                                  card->ext_csd.generic_cmd6_time);
1413                 if (err && err != -EBADMSG)
1414                         goto free_card;
1415
1416                 /*
1417                  * The err can be -EBADMSG or 0,
1418                  * so check for success and update the flag
1419                  */
1420                 if (!err)
1421                         card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
1422         }
1423
1424         /*
1425          * Select timing interface
1426          */
1427         err = mmc_select_timing(card);
1428         if (err)
1429                 goto free_card;
1430
1431         if (mmc_card_hs200(card)) {
1432                 err = mmc_hs200_tuning(card);
1433                 if (err)
1434                         goto free_card;
1435
1436                 err = mmc_select_hs400(card);
1437                 if (err)
1438                         goto free_card;
1439         } else if (mmc_card_hs(card)) {
1440                 /* Select the desired bus width optionally */
1441                 err = mmc_select_bus_width(card);
1442                 if (!IS_ERR_VALUE(err)) {
1443                         err = mmc_select_hs_ddr(card);
1444                         if (err)
1445                                 goto free_card;
1446                 }
1447         }
1448
1449         /*
1450          * Choose the power class with selected bus interface
1451          */
1452         mmc_select_powerclass(card);
1453
1454         /*
1455          * Enable HPI feature (if supported)
1456          */
1457         if (card->ext_csd.hpi) {
1458                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1459                                 EXT_CSD_HPI_MGMT, 1,
1460                                 card->ext_csd.generic_cmd6_time);
1461                 if (err && err != -EBADMSG)
1462                         goto free_card;
1463                 if (err) {
1464                         pr_warn("%s: Enabling HPI failed\n",
1465                                 mmc_hostname(card->host));
1466                         err = 0;
1467                 } else
1468                         card->ext_csd.hpi_en = 1;
1469         }
1470
1471         /*
1472          * If cache size is higher than 0, this indicates
1473          * the existence of cache and it can be turned on.
1474          */
1475         if (card->ext_csd.cache_size > 0) {
1476                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1477                                 EXT_CSD_CACHE_CTRL, 1,
1478                                 card->ext_csd.generic_cmd6_time);
1479                 if (err && err != -EBADMSG)
1480                         goto free_card;
1481
1482                 /*
1483                  * Only if no error, cache is turned on successfully.
1484                  */
1485                 if (err) {
1486                         pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
1487                                 mmc_hostname(card->host), err);
1488                         card->ext_csd.cache_ctrl = 0;
1489                         err = 0;
1490                 } else {
1491                         card->ext_csd.cache_ctrl = 1;
1492                 }
1493         }
1494
1495         /*
1496          * The mandatory minimum values are defined for packed command.
1497          * read: 5, write: 3
1498          */
1499         if (card->ext_csd.max_packed_writes >= 3 &&
1500             card->ext_csd.max_packed_reads >= 5 &&
1501             host->caps2 & MMC_CAP2_PACKED_CMD) {
1502                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1503                                 EXT_CSD_EXP_EVENTS_CTRL,
1504                                 EXT_CSD_PACKED_EVENT_EN,
1505                                 card->ext_csd.generic_cmd6_time);
1506                 if (err && err != -EBADMSG)
1507                         goto free_card;
1508                 if (err) {
1509                         pr_warn("%s: Enabling packed event failed\n",
1510                                 mmc_hostname(card->host));
1511                         card->ext_csd.packed_event_en = 0;
1512                         err = 0;
1513                 } else {
1514                         card->ext_csd.packed_event_en = 1;
1515                 }
1516         }
1517
1518         if (!oldcard)
1519                 host->card = card;
1520
1521         return 0;
1522
1523 free_card:
1524         if (!oldcard)
1525                 mmc_remove_card(card);
1526 err:
1527         return err;
1528 }
1529
1530 static int mmc_can_sleep(struct mmc_card *card)
1531 {
1532         return (card && card->ext_csd.rev >= 3);
1533 }
1534
1535 static int mmc_sleep(struct mmc_host *host)
1536 {
1537         struct mmc_command cmd = {0};
1538         struct mmc_card *card = host->card;
1539         unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
1540         int err;
1541
1542         err = mmc_deselect_cards(host);
1543         if (err)
1544                 return err;
1545
1546         cmd.opcode = MMC_SLEEP_AWAKE;
1547         cmd.arg = card->rca << 16;
1548         cmd.arg |= 1 << 15;
1549
1550         /*
1551          * If the max_busy_timeout of the host is specified, validate it against
1552          * the sleep cmd timeout. A failure means we need to prevent the host
1553          * from doing hw busy detection, which is done by converting to a R1
1554          * response instead of a R1B.
1555          */
1556         if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
1557                 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1558         } else {
1559                 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
1560                 cmd.busy_timeout = timeout_ms;
1561         }
1562
1563         err = mmc_wait_for_cmd(host, &cmd, 0);
1564         if (err)
1565                 return err;
1566
1567         /*
1568          * If the host does not wait while the card signals busy, then we will
1569          * will have to wait the sleep/awake timeout.  Note, we cannot use the
1570          * SEND_STATUS command to poll the status because that command (and most
1571          * others) is invalid while the card sleeps.
1572          */
1573         if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
1574                 mmc_delay(timeout_ms);
1575
1576         return err;
1577 }
1578
1579 static int mmc_can_poweroff_notify(const struct mmc_card *card)
1580 {
1581         return card &&
1582                 mmc_card_mmc(card) &&
1583                 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
1584 }
1585
1586 static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
1587 {
1588         unsigned int timeout = card->ext_csd.generic_cmd6_time;
1589         int err;
1590
1591         /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
1592         if (notify_type == EXT_CSD_POWER_OFF_LONG)
1593                 timeout = card->ext_csd.power_off_longtime;
1594
1595         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1596                         EXT_CSD_POWER_OFF_NOTIFICATION,
1597                         notify_type, timeout, true, false, false);
1598         if (err)
1599                 pr_err("%s: Power Off Notification timed out, %u\n",
1600                        mmc_hostname(card->host), timeout);
1601
1602         /* Disable the power off notification after the switch operation. */
1603         card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
1604
1605         return err;
1606 }
1607
1608 /*
1609  * Host is being removed. Free up the current card.
1610  */
1611 static void mmc_remove(struct mmc_host *host)
1612 {
1613         BUG_ON(!host);
1614         BUG_ON(!host->card);
1615
1616         mmc_remove_card(host->card);
1617         host->card = NULL;
1618 }
1619
1620 /*
1621  * Card detection - card is alive.
1622  */
1623 static int mmc_alive(struct mmc_host *host)
1624 {
1625         return mmc_send_status(host->card, NULL);
1626 }
1627
1628 /*
1629  * Card detection callback from host.
1630  */
1631 static void mmc_detect(struct mmc_host *host)
1632 {
1633         int err;
1634
1635         BUG_ON(!host);
1636         BUG_ON(!host->card);
1637
1638         mmc_get_card(host->card);
1639
1640         /*
1641          * Just check if our card has been removed.
1642          */
1643         err = _mmc_detect_card_removed(host);
1644
1645         mmc_put_card(host->card);
1646
1647         if (err) {
1648                 mmc_remove(host);
1649
1650                 mmc_claim_host(host);
1651                 mmc_detach_bus(host);
1652                 mmc_power_off(host);
1653                 mmc_release_host(host);
1654         }
1655 }
1656
1657 static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
1658 {
1659         int err = 0;
1660         unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
1661                                         EXT_CSD_POWER_OFF_LONG;
1662
1663         BUG_ON(!host);
1664         BUG_ON(!host->card);
1665
1666         mmc_claim_host(host);
1667
1668         if (mmc_card_suspended(host->card))
1669                 goto out;
1670
1671         if (mmc_card_doing_bkops(host->card)) {
1672                 err = mmc_stop_bkops(host->card);
1673                 if (err)
1674                         goto out;
1675         }
1676
1677         err = mmc_flush_cache(host->card);
1678         if (err)
1679                 goto out;
1680
1681         if (mmc_can_poweroff_notify(host->card) &&
1682                 ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
1683                 err = mmc_poweroff_notify(host->card, notify_type);
1684         else if (mmc_can_sleep(host->card))
1685                 err = mmc_sleep(host);
1686         else if (!mmc_host_is_spi(host))
1687                 err = mmc_deselect_cards(host);
1688
1689         if (!err) {
1690                 mmc_power_off(host);
1691                 mmc_card_set_suspended(host->card);
1692         }
1693 out:
1694         mmc_release_host(host);
1695         return err;
1696 }
1697
1698 /*
1699  * Suspend callback
1700  */
1701 static int mmc_suspend(struct mmc_host *host)
1702 {
1703         int err;
1704
1705         err = _mmc_suspend(host, true);
1706         if (!err) {
1707                 pm_runtime_disable(&host->card->dev);
1708                 pm_runtime_set_suspended(&host->card->dev);
1709         }
1710
1711         return err;
1712 }
1713
1714 /*
1715  * This function tries to determine if the same card is still present
1716  * and, if so, restore all state to it.
1717  */
1718 static int _mmc_resume(struct mmc_host *host)
1719 {
1720         int err = 0;
1721
1722         BUG_ON(!host);
1723         BUG_ON(!host->card);
1724
1725         mmc_claim_host(host);
1726
1727         if (!mmc_card_suspended(host->card))
1728                 goto out;
1729
1730         mmc_power_up(host, host->card->ocr);
1731         err = mmc_init_card(host, host->card->ocr, host->card);
1732         mmc_card_clr_suspended(host->card);
1733
1734 out:
1735         mmc_release_host(host);
1736         return err;
1737 }
1738
1739 /*
1740  * Shutdown callback
1741  */
1742 static int mmc_shutdown(struct mmc_host *host)
1743 {
1744         int err = 0;
1745
1746         /*
1747          * In a specific case for poweroff notify, we need to resume the card
1748          * before we can shutdown it properly.
1749          */
1750         if (mmc_can_poweroff_notify(host->card) &&
1751                 !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
1752                 err = _mmc_resume(host);
1753
1754         if (!err)
1755                 err = _mmc_suspend(host, false);
1756
1757         return err;
1758 }
1759
1760 /*
1761  * Callback for resume.
1762  */
1763 static int mmc_resume(struct mmc_host *host)
1764 {
1765         int err = 0;
1766
1767         if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
1768                 err = _mmc_resume(host);
1769                 pm_runtime_set_active(&host->card->dev);
1770                 pm_runtime_mark_last_busy(&host->card->dev);
1771         }
1772         pm_runtime_enable(&host->card->dev);
1773
1774         return err;
1775 }
1776
1777 /*
1778  * Callback for runtime_suspend.
1779  */
1780 static int mmc_runtime_suspend(struct mmc_host *host)
1781 {
1782         int err;
1783
1784         if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
1785                 return 0;
1786
1787         err = _mmc_suspend(host, true);
1788         if (err)
1789                 pr_err("%s: error %d doing aggessive suspend\n",
1790                         mmc_hostname(host), err);
1791
1792         return err;
1793 }
1794
1795 /*
1796  * Callback for runtime_resume.
1797  */
1798 static int mmc_runtime_resume(struct mmc_host *host)
1799 {
1800         int err;
1801
1802         if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
1803                 return 0;
1804
1805         err = _mmc_resume(host);
1806         if (err)
1807                 pr_err("%s: error %d doing aggessive resume\n",
1808                         mmc_hostname(host), err);
1809
1810         return 0;
1811 }
1812
1813 static int mmc_power_restore(struct mmc_host *host)
1814 {
1815         int ret;
1816
1817         mmc_claim_host(host);
1818         ret = mmc_init_card(host, host->card->ocr, host->card);
1819         mmc_release_host(host);
1820
1821         return ret;
1822 }
1823
1824 static const struct mmc_bus_ops mmc_ops = {
1825         .remove = mmc_remove,
1826         .detect = mmc_detect,
1827         .suspend = mmc_suspend,
1828         .resume = mmc_resume,
1829         .runtime_suspend = mmc_runtime_suspend,
1830         .runtime_resume = mmc_runtime_resume,
1831         .power_restore = mmc_power_restore,
1832         .alive = mmc_alive,
1833         .shutdown = mmc_shutdown,
1834 };
1835
1836 /*
1837  * Starting point for MMC card init.
1838  */
1839 int mmc_attach_mmc(struct mmc_host *host)
1840 {
1841         int err;
1842         u32 ocr, rocr;
1843
1844         BUG_ON(!host);
1845         WARN_ON(!host->claimed);
1846
1847         /* Set correct bus mode for MMC before attempting attach */
1848         if (!mmc_host_is_spi(host))
1849                 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1850
1851         err = mmc_send_op_cond(host, 0, &ocr);
1852         if (err)
1853                 return err;
1854
1855         mmc_attach_bus(host, &mmc_ops);
1856         if (host->ocr_avail_mmc)
1857                 host->ocr_avail = host->ocr_avail_mmc;
1858
1859         /*
1860          * We need to get OCR a different way for SPI.
1861          */
1862         if (mmc_host_is_spi(host)) {
1863                 err = mmc_spi_read_ocr(host, 1, &ocr);
1864                 if (err)
1865                         goto err;
1866         }
1867
1868         rocr = mmc_select_voltage(host, ocr);
1869
1870         /*
1871          * Can we support the voltage of the card?
1872          */
1873         if (!rocr) {
1874                 err = -EINVAL;
1875                 goto err;
1876         }
1877
1878         /*
1879          * Detect and init the card.
1880          */
1881         err = mmc_init_card(host, rocr, NULL);
1882         if (err)
1883                 goto err;
1884
1885         mmc_release_host(host);
1886         err = mmc_add_card(host->card);
1887         mmc_claim_host(host);
1888         if (err)
1889                 goto remove_card;
1890
1891         return 0;
1892
1893 remove_card:
1894         mmc_release_host(host);
1895         mmc_remove_card(host->card);
1896         mmc_claim_host(host);
1897         host->card = NULL;
1898 err:
1899         mmc_detach_bus(host);
1900
1901         pr_err("%s: error %d whilst initialising MMC card\n",
1902                 mmc_hostname(host), err);
1903
1904         return err;
1905 }