greybus: hd: generalise cport allocation
[cascardo/linux.git] / drivers / staging / greybus / sdio.c
1 /*
2  * SD/MMC Greybus driver.
3  *
4  * Copyright 2014-2015 Google Inc.
5  * Copyright 2014-2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/mmc/core.h>
12 #include <linux/mmc/host.h>
13 #include <linux/mmc/mmc.h>
14 #include <linux/scatterlist.h>
15 #include <linux/workqueue.h>
16
17 #include "greybus.h"
18 #include "gpbridge.h"
19
20 struct gb_sdio_host {
21         struct gb_connection    *connection;
22         struct gpbridge_device  *gpbdev;
23         struct mmc_host         *mmc;
24         struct mmc_request      *mrq;
25         struct mutex            lock;   /* lock for this host */
26         size_t                  data_max;
27         void                    *xfer_buffer;
28         spinlock_t              xfer;   /* lock to cancel ongoing transfer */
29         bool                    xfer_stop;
30         struct workqueue_struct *mrq_workqueue;
31         struct work_struct      mrqwork;
32         u8                      queued_events;
33         bool                    removed;
34         bool                    card_present;
35         bool                    read_only;
36 };
37
38
39 #define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
40                                  GB_SDIO_RSP_OPCODE)
41 #define GB_SDIO_RSP_R3_R4       (GB_SDIO_RSP_PRESENT)
42 #define GB_SDIO_RSP_R2          (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
43                                  GB_SDIO_RSP_136)
44 #define GB_SDIO_RSP_R1B         (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
45                                  GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
46
47 static inline bool single_op(struct mmc_command *cmd)
48 {
49         uint32_t opcode = cmd->opcode;
50
51         return opcode == MMC_WRITE_BLOCK ||
52                opcode == MMC_READ_SINGLE_BLOCK;
53 }
54
55 static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
56 {
57         u32 caps = 0;
58         u32 caps2 = 0;
59
60         caps = ((r & GB_SDIO_CAP_NONREMOVABLE) ? MMC_CAP_NONREMOVABLE : 0) |
61                 ((r & GB_SDIO_CAP_4_BIT_DATA) ? MMC_CAP_4_BIT_DATA : 0) |
62                 ((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
63                 ((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
64                 ((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
65                 ((r & GB_SDIO_CAP_ERASE) ? MMC_CAP_ERASE : 0) |
66                 ((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
67                 ((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
68                 ((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
69                 ((r & GB_SDIO_CAP_UHS_SDR12) ? MMC_CAP_UHS_SDR12 : 0) |
70                 ((r & GB_SDIO_CAP_UHS_SDR25) ? MMC_CAP_UHS_SDR25 : 0) |
71                 ((r & GB_SDIO_CAP_UHS_SDR50) ? MMC_CAP_UHS_SDR50 : 0) |
72                 ((r & GB_SDIO_CAP_UHS_SDR104) ? MMC_CAP_UHS_SDR104 : 0) |
73                 ((r & GB_SDIO_CAP_UHS_DDR50) ? MMC_CAP_UHS_DDR50 : 0) |
74                 ((r & GB_SDIO_CAP_DRIVER_TYPE_A) ? MMC_CAP_DRIVER_TYPE_A : 0) |
75                 ((r & GB_SDIO_CAP_DRIVER_TYPE_C) ? MMC_CAP_DRIVER_TYPE_C : 0) |
76                 ((r & GB_SDIO_CAP_DRIVER_TYPE_D) ? MMC_CAP_DRIVER_TYPE_D : 0);
77
78         caps2 = ((r & GB_SDIO_CAP_HS200_1_2V) ? MMC_CAP2_HS200_1_2V_SDR : 0) |
79 #ifdef MMC_HS400_SUPPORTED
80                 ((r & GB_SDIO_CAP_HS400_1_2V) ? MMC_CAP2_HS400_1_2V : 0) |
81                 ((r & GB_SDIO_CAP_HS400_1_8V) ? MMC_CAP2_HS400_1_8V : 0) |
82 #endif
83                 ((r & GB_SDIO_CAP_HS200_1_8V) ? MMC_CAP2_HS200_1_8V_SDR : 0);
84
85         host->mmc->caps = caps | MMC_CAP_NEEDS_POLL;
86         host->mmc->caps2 = caps2;
87
88         if (caps & MMC_CAP_NONREMOVABLE)
89                 host->card_present = true;
90 }
91
92 static u32 _gb_sdio_get_host_ocr(u32 ocr)
93 {
94         return (((ocr & GB_SDIO_VDD_165_195) ? MMC_VDD_165_195 : 0) |
95                 ((ocr & GB_SDIO_VDD_20_21) ? MMC_VDD_20_21 : 0) |
96                 ((ocr & GB_SDIO_VDD_21_22) ? MMC_VDD_21_22 : 0) |
97                 ((ocr & GB_SDIO_VDD_22_23) ? MMC_VDD_22_23 : 0) |
98                 ((ocr & GB_SDIO_VDD_23_24) ? MMC_VDD_23_24 : 0) |
99                 ((ocr & GB_SDIO_VDD_24_25) ? MMC_VDD_24_25 : 0) |
100                 ((ocr & GB_SDIO_VDD_25_26) ? MMC_VDD_25_26 : 0) |
101                 ((ocr & GB_SDIO_VDD_26_27) ? MMC_VDD_26_27 : 0) |
102                 ((ocr & GB_SDIO_VDD_27_28) ? MMC_VDD_27_28 : 0) |
103                 ((ocr & GB_SDIO_VDD_28_29) ? MMC_VDD_28_29 : 0) |
104                 ((ocr & GB_SDIO_VDD_29_30) ? MMC_VDD_29_30 : 0) |
105                 ((ocr & GB_SDIO_VDD_30_31) ? MMC_VDD_30_31 : 0) |
106                 ((ocr & GB_SDIO_VDD_31_32) ? MMC_VDD_31_32 : 0) |
107                 ((ocr & GB_SDIO_VDD_32_33) ? MMC_VDD_32_33 : 0) |
108                 ((ocr & GB_SDIO_VDD_33_34) ? MMC_VDD_33_34 : 0) |
109                 ((ocr & GB_SDIO_VDD_34_35) ? MMC_VDD_34_35 : 0) |
110                 ((ocr & GB_SDIO_VDD_35_36) ? MMC_VDD_35_36 : 0)
111                 );
112 }
113
114 static int gb_sdio_get_caps(struct gb_sdio_host *host)
115 {
116         struct gb_sdio_get_caps_response response;
117         struct mmc_host *mmc = host->mmc;
118         u16 data_max;
119         u32 blksz;
120         u32 ocr;
121         u32 r;
122         int ret;
123
124         ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
125                                 NULL, 0, &response, sizeof(response));
126         if (ret < 0)
127                 return ret;
128         r = le32_to_cpu(response.caps);
129
130         _gb_sdio_set_host_caps(host, r);
131
132         /* get the max block size that could fit our payload */
133         data_max = gb_operation_get_payload_size_max(host->connection);
134         data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
135                        data_max - sizeof(struct gb_sdio_transfer_response));
136
137         blksz = min(le16_to_cpu(response.max_blk_size), data_max);
138         blksz = max_t(u32, 512, blksz);
139
140         mmc->max_blk_size = rounddown_pow_of_two(blksz);
141         mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
142         host->data_max = data_max;
143
144         /* get ocr supported values */
145         ocr = _gb_sdio_get_host_ocr(le32_to_cpu(response.ocr));
146         mmc->ocr_avail = ocr;
147         mmc->ocr_avail_sdio = mmc->ocr_avail;
148         mmc->ocr_avail_sd = mmc->ocr_avail;
149         mmc->ocr_avail_mmc = mmc->ocr_avail;
150
151         /* get frequency range values */
152         mmc->f_min = le32_to_cpu(response.f_min);
153         mmc->f_max = le32_to_cpu(response.f_max);
154
155         return 0;
156 }
157
158 static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
159 {
160         if (event & GB_SDIO_CARD_INSERTED)
161                 host->queued_events &= ~GB_SDIO_CARD_REMOVED;
162         else if (event & GB_SDIO_CARD_REMOVED)
163                 host->queued_events &= ~GB_SDIO_CARD_INSERTED;
164
165         host->queued_events |= event;
166 }
167
168 static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
169 {
170         u8 state_changed = 0;
171
172         if (event & GB_SDIO_CARD_INSERTED) {
173                 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
174                         return 0;
175                 if (host->card_present)
176                         return 0;
177                 host->card_present = true;
178                 state_changed = 1;
179         }
180
181         if (event & GB_SDIO_CARD_REMOVED) {
182                 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
183                         return 0;
184                 if (!(host->card_present))
185                         return 0;
186                 host->card_present = false;
187                 state_changed = 1;
188         }
189
190         if (event & GB_SDIO_WP) {
191                 host->read_only = true;
192         }
193
194         if (state_changed) {
195                 dev_info(mmc_dev(host->mmc), "card %s now event\n",
196                          (host->card_present ?  "inserted" : "removed"));
197                 mmc_detect_change(host->mmc, 0);
198         }
199
200         return 0;
201 }
202
203 static int gb_sdio_request_handler(struct gb_operation *op)
204 {
205         struct gb_sdio_host *host = gb_connection_get_data(op->connection);
206         struct gb_message *request;
207         struct gb_sdio_event_request *payload;
208         u8 type = op->type;
209         int ret =  0;
210         u8 event;
211
212         if (type != GB_SDIO_TYPE_EVENT) {
213                 dev_err(mmc_dev(host->mmc),
214                         "unsupported unsolicited event: %u\n", type);
215                 return -EINVAL;
216         }
217
218         request = op->request;
219
220         if (request->payload_size < sizeof(*payload)) {
221                 dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
222                         request->payload_size, sizeof(*payload));
223                 return -EINVAL;
224         }
225
226         payload = request->payload;
227         event = payload->event;
228
229         if (host->removed)
230                 _gb_queue_event(host, event);
231         else
232                 ret = _gb_sdio_process_events(host, event);
233
234         return ret;
235 }
236
237 static int gb_sdio_set_ios(struct gb_sdio_host *host,
238                            struct gb_sdio_set_ios_request *request)
239 {
240         return gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS,
241                                  request, sizeof(*request), NULL, 0);
242 }
243
244 static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
245                          size_t len, u16 nblocks, off_t skip)
246 {
247         struct gb_sdio_transfer_request *request;
248         struct gb_sdio_transfer_response response;
249         struct scatterlist *sg = data->sg;
250         unsigned int sg_len = data->sg_len;
251         size_t copied;
252         u16 send_blksz;
253         u16 send_blocks;
254         int ret;
255
256         WARN_ON(len > host->data_max);
257
258         request = host->xfer_buffer;
259         request->data_flags = (data->flags >> 8);
260         request->data_blocks = cpu_to_le16(nblocks);
261         request->data_blksz = cpu_to_le16(data->blksz);
262
263         copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
264
265         if (copied != len)
266                 return -EINVAL;
267
268         ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_TRANSFER,
269                                 request, len + sizeof(*request),
270                                 &response, sizeof(response));
271         if (ret < 0)
272                 return ret;
273
274         send_blocks = le16_to_cpu(response.data_blocks);
275         send_blksz = le16_to_cpu(response.data_blksz);
276
277         if (len != send_blksz * send_blocks) {
278                 dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
279                         len, send_blksz * send_blocks);
280                 return -EINVAL;
281         }
282
283         return ret;
284 }
285
286 static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
287                          size_t len, u16 nblocks, off_t skip)
288 {
289         struct gb_sdio_transfer_request request;
290         struct gb_sdio_transfer_response *response;
291         struct scatterlist *sg = data->sg;
292         unsigned int sg_len = data->sg_len;
293         size_t copied;
294         u16 recv_blksz;
295         u16 recv_blocks;
296         int ret;
297
298         WARN_ON(len > host->data_max);
299
300         request.data_flags = (data->flags >> 8);
301         request.data_blocks = cpu_to_le16(nblocks);
302         request.data_blksz = cpu_to_le16(data->blksz);
303
304         response = host->xfer_buffer;
305
306         ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_TRANSFER,
307                                 &request, sizeof(request), response, len +
308                                 sizeof(*response));
309         if (ret < 0)
310                 return ret;
311
312         recv_blocks = le16_to_cpu(response->data_blocks);
313         recv_blksz = le16_to_cpu(response->data_blksz);
314
315         if (len != recv_blksz * recv_blocks) {
316                 dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
317                         recv_blksz * recv_blocks, len);
318                 return -EINVAL;
319         }
320
321         copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
322                                       skip);
323         if (copied != len)
324                 return -EINVAL;
325
326         return 0;
327 }
328
329 static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
330 {
331         size_t left, len;
332         off_t skip = 0;
333         int ret = 0;
334         u16 nblocks;
335
336         if (single_op(data->mrq->cmd) && data->blocks > 1) {
337                 ret = -ETIMEDOUT;
338                 goto out;
339         }
340
341         left = data->blksz * data->blocks;
342
343         while (left) {
344                 /* check is a stop transmission is pending */
345                 spin_lock(&host->xfer);
346                 if (host->xfer_stop) {
347                         host->xfer_stop = false;
348                         spin_unlock(&host->xfer);
349                         ret = -EINTR;
350                         goto out;
351                 }
352                 spin_unlock(&host->xfer);
353                 len = min(left, host->data_max);
354                 nblocks = len / data->blksz;
355                 len = nblocks * data->blksz;
356
357                 if (data->flags & MMC_DATA_READ) {
358                         ret = _gb_sdio_recv(host, data, len, nblocks, skip);
359                         if (ret < 0)
360                                 goto out;
361                 } else {
362                         ret = _gb_sdio_send(host, data, len, nblocks, skip);
363                         if (ret < 0)
364                                 goto out;
365                 }
366                 data->bytes_xfered += len;
367                 left -= len;
368                 skip += len;
369         }
370
371 out:
372         data->error = ret;
373         return ret;
374 }
375
376 static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
377 {
378         struct gb_sdio_command_request request = {0};
379         struct gb_sdio_command_response response;
380         struct mmc_data *data = host->mrq->data;
381         u8 cmd_flags;
382         u8 cmd_type;
383         int i;
384         int ret;
385
386         switch (mmc_resp_type(cmd)) {
387         case MMC_RSP_NONE:
388                 cmd_flags = GB_SDIO_RSP_NONE;
389                 break;
390         case MMC_RSP_R1:
391                 cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
392                 break;
393         case MMC_RSP_R1B:
394                 cmd_flags = GB_SDIO_RSP_R1B;
395                 break;
396         case MMC_RSP_R2:
397                 cmd_flags = GB_SDIO_RSP_R2;
398                 break;
399         case MMC_RSP_R3:
400                 cmd_flags = GB_SDIO_RSP_R3_R4;
401                 break;
402         default:
403                 dev_err(mmc_dev(host->mmc), "cmd flag invalid 0x%04x\n",
404                         mmc_resp_type(cmd));
405                 ret = -EINVAL;
406                 goto out;
407         }
408
409         switch (mmc_cmd_type(cmd)) {
410         case MMC_CMD_BC:
411                 cmd_type = GB_SDIO_CMD_BC;
412                 break;
413         case MMC_CMD_BCR:
414                 cmd_type = GB_SDIO_CMD_BCR;
415                 break;
416         case MMC_CMD_AC:
417                 cmd_type = GB_SDIO_CMD_AC;
418                 break;
419         case MMC_CMD_ADTC:
420                 cmd_type = GB_SDIO_CMD_ADTC;
421                 break;
422         default:
423                 dev_err(mmc_dev(host->mmc), "cmd type invalid 0x%04x\n",
424                         mmc_cmd_type(cmd));
425                 ret = -EINVAL;
426                 goto out;
427         }
428
429         request.cmd = cmd->opcode;
430         request.cmd_flags = cmd_flags;
431         request.cmd_type = cmd_type;
432         request.cmd_arg = cpu_to_le32(cmd->arg);
433         /* some controllers need to know at command time data details */
434         if (data) {
435                 request.data_blocks = cpu_to_le16(data->blocks);
436                 request.data_blksz = cpu_to_le16(data->blksz);
437         }
438
439         ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND,
440                                 &request, sizeof(request), &response,
441                                 sizeof(response));
442         if (ret < 0)
443                 goto out;
444
445         /* no response expected */
446         if (cmd_flags & GB_SDIO_RSP_NONE)
447                 goto out;
448
449         /* long response expected */
450         if (cmd_flags & GB_SDIO_RSP_R2)
451                 for (i = 0; i < 4; i++)
452                         cmd->resp[i] = le32_to_cpu(response.resp[i]);
453         else
454                 cmd->resp[0] = le32_to_cpu(response.resp[0]);
455
456 out:
457         cmd->error = ret;
458         return ret;
459 }
460
461 static void gb_sdio_mrq_work(struct work_struct *work)
462 {
463         struct gb_sdio_host *host;
464         struct mmc_request *mrq;
465         int ret;
466
467         host = container_of(work, struct gb_sdio_host, mrqwork);
468
469         mutex_lock(&host->lock);
470         mrq = host->mrq;
471         if (!mrq) {
472                 mutex_unlock(&host->lock);
473                 dev_err(mmc_dev(host->mmc), "mmc request is NULL");
474                 return;
475         }
476
477         if (host->removed) {
478                 mrq->cmd->error = -ESHUTDOWN;
479                 goto done;
480         }
481
482         if (mrq->sbc) {
483                 ret = gb_sdio_command(host, mrq->sbc);
484                 if (ret < 0)
485                         goto done;
486         }
487
488         ret = gb_sdio_command(host, mrq->cmd);
489         if (ret < 0)
490                 goto done;
491
492         if (mrq->data) {
493                 ret = gb_sdio_transfer(host, mrq->data);
494                 if (ret < 0)
495                         goto done;
496         }
497
498         if (mrq->stop) {
499                 ret = gb_sdio_command(host, mrq->stop);
500                 if (ret < 0)
501                         goto done;
502         }
503
504 done:
505         host->mrq = NULL;
506         mutex_unlock(&host->lock);
507         mmc_request_done(host->mmc, mrq);
508 }
509
510 static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
511 {
512         struct gb_sdio_host *host = mmc_priv(mmc);
513         struct mmc_command *cmd = mrq->cmd;
514
515         /* Check if it is a cancel to ongoing transfer */
516         if (cmd->opcode == MMC_STOP_TRANSMISSION) {
517                 spin_lock(&host->xfer);
518                 host->xfer_stop = true;
519                 spin_unlock(&host->xfer);
520         }
521
522         mutex_lock(&host->lock);
523
524         WARN_ON(host->mrq);
525         host->mrq = mrq;
526
527         if (host->removed) {
528                 mrq->cmd->error = -ESHUTDOWN;
529                 goto out;
530         }
531         if (!host->card_present) {
532                 mrq->cmd->error = -ENOMEDIUM;
533                 goto out;
534         }
535
536         queue_work(host->mrq_workqueue, &host->mrqwork);
537
538         mutex_unlock(&host->lock);
539         return;
540
541 out:
542         host->mrq = NULL;
543         mutex_unlock(&host->lock);
544         mmc_request_done(mmc, mrq);
545 }
546
547 static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
548 {
549         struct gb_sdio_host *host = mmc_priv(mmc);
550         struct gb_sdio_set_ios_request request;
551         int ret;
552         u8 power_mode;
553         u8 bus_width;
554         u8 timing;
555         u8 signal_voltage;
556         u8 drv_type;
557
558         mutex_lock(&host->lock);
559         request.clock = cpu_to_le32(ios->clock);
560         request.vdd = cpu_to_le32(1 << ios->vdd);
561
562         request.bus_mode = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
563                             GB_SDIO_BUSMODE_OPENDRAIN :
564                             GB_SDIO_BUSMODE_PUSHPULL);
565
566         switch (ios->power_mode) {
567         case MMC_POWER_OFF:
568         default:
569                 power_mode = GB_SDIO_POWER_OFF;
570                 break;
571         case MMC_POWER_UP:
572                 power_mode = GB_SDIO_POWER_UP;
573                 break;
574         case MMC_POWER_ON:
575                 power_mode = GB_SDIO_POWER_ON;
576                 break;
577 #ifdef MMC_POWER_UNDEFINED_SUPPORTED
578         case MMC_POWER_UNDEFINED:
579                 power_mode = GB_SDIO_POWER_UNDEFINED;
580                 break;
581 #endif
582         }
583         request.power_mode = power_mode;
584
585         switch (ios->bus_width) {
586         case MMC_BUS_WIDTH_1:
587                 bus_width = GB_SDIO_BUS_WIDTH_1;
588                 break;
589         case MMC_BUS_WIDTH_4:
590         default:
591                 bus_width = GB_SDIO_BUS_WIDTH_4;
592                 break;
593         case MMC_BUS_WIDTH_8:
594                 bus_width = GB_SDIO_BUS_WIDTH_8;
595                 break;
596         }
597         request.bus_width = bus_width;
598
599         switch (ios->timing) {
600         case MMC_TIMING_LEGACY:
601         default:
602                 timing = GB_SDIO_TIMING_LEGACY;
603                 break;
604         case MMC_TIMING_MMC_HS:
605                 timing = GB_SDIO_TIMING_MMC_HS;
606                 break;
607         case MMC_TIMING_SD_HS:
608                 timing = GB_SDIO_TIMING_SD_HS;
609                 break;
610         case MMC_TIMING_UHS_SDR12:
611                 timing = GB_SDIO_TIMING_UHS_SDR12;
612                 break;
613         case MMC_TIMING_UHS_SDR25:
614                 timing = GB_SDIO_TIMING_UHS_SDR25;
615                 break;
616         case MMC_TIMING_UHS_SDR50:
617                 timing = GB_SDIO_TIMING_UHS_SDR50;
618                 break;
619         case MMC_TIMING_UHS_SDR104:
620                 timing = GB_SDIO_TIMING_UHS_SDR104;
621                 break;
622         case MMC_TIMING_UHS_DDR50:
623                 timing = GB_SDIO_TIMING_UHS_DDR50;
624                 break;
625 #ifdef MMC_DDR52_DEFINED
626         case MMC_TIMING_MMC_DDR52:
627                 timing = GB_SDIO_TIMING_MMC_DDR52;
628                 break;
629 #endif
630         case MMC_TIMING_MMC_HS200:
631                 timing = GB_SDIO_TIMING_MMC_HS200;
632                 break;
633 #ifdef MMC_HS400_SUPPORTED
634         case MMC_TIMING_MMC_HS400:
635                 timing = GB_SDIO_TIMING_MMC_HS400;
636                 break;
637 #endif
638         }
639         request.timing = timing;
640
641         switch (ios->signal_voltage) {
642         case MMC_SIGNAL_VOLTAGE_330:
643                 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
644                 break;
645         case MMC_SIGNAL_VOLTAGE_180:
646         default:
647                 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
648                 break;
649         case MMC_SIGNAL_VOLTAGE_120:
650                 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
651                 break;
652         }
653         request.signal_voltage = signal_voltage;
654
655         switch (ios->drv_type) {
656         case MMC_SET_DRIVER_TYPE_A:
657                 drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
658                 break;
659         case MMC_SET_DRIVER_TYPE_C:
660                 drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
661                 break;
662         case MMC_SET_DRIVER_TYPE_D:
663                 drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
664                 break;
665         case MMC_SET_DRIVER_TYPE_B:
666         default:
667                 drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
668                 break;
669         }
670         request.drv_type = drv_type;
671
672         ret = gb_sdio_set_ios(host, &request);
673         if (ret < 0)
674                 goto out;
675
676         memcpy(&mmc->ios, ios, sizeof(mmc->ios));
677
678 out:
679         mutex_unlock(&host->lock);
680 }
681
682 static int gb_mmc_get_ro(struct mmc_host *mmc)
683 {
684         struct gb_sdio_host *host = mmc_priv(mmc);
685
686         mutex_lock(&host->lock);
687         if (host->removed)
688                 return -ESHUTDOWN;
689         mutex_unlock(&host->lock);
690         return host->read_only;
691 }
692
693 static int gb_mmc_get_cd(struct mmc_host *mmc)
694 {
695         struct gb_sdio_host *host = mmc_priv(mmc);
696
697         mutex_lock(&host->lock);
698         if (host->removed)
699                 return -ESHUTDOWN;
700         mutex_unlock(&host->lock);
701         return host->card_present;
702 }
703
704 static const struct mmc_host_ops gb_sdio_ops = {
705         .request        = gb_mmc_request,
706         .set_ios        = gb_mmc_set_ios,
707         .get_ro         = gb_mmc_get_ro,
708         .get_cd         = gb_mmc_get_cd,
709 };
710
711 static int gb_sdio_probe(struct gpbridge_device *gpbdev,
712                          const struct gpbridge_device_id *id)
713 {
714         struct gb_connection *connection;
715         struct mmc_host *mmc;
716         struct gb_sdio_host *host;
717         size_t max_buffer;
718         int ret = 0;
719
720         mmc = mmc_alloc_host(sizeof(*host), &gpbdev->dev);
721         if (!mmc)
722                 return -ENOMEM;
723
724         connection = gb_connection_create(gpbdev->bundle,
725                                           le16_to_cpu(gpbdev->cport_desc->id),
726                                           gb_sdio_request_handler);
727         if (IS_ERR(connection)) {
728                 ret = PTR_ERR(connection);
729                 goto exit_mmc_free;
730         }
731
732         host = mmc_priv(mmc);
733         host->mmc = mmc;
734         host->removed = true;
735
736         host->connection = connection;
737         gb_connection_set_data(connection, host);
738         host->gpbdev = gpbdev;
739         gb_gpbridge_set_data(gpbdev, host);
740
741         ret = gb_connection_enable_tx(connection);
742         if (ret)
743                 goto exit_connection_destroy;
744
745         ret = gb_gpbridge_get_version(connection);
746         if (ret)
747                 goto exit_connection_disable;
748
749         ret = gb_sdio_get_caps(host);
750         if (ret < 0)
751                 goto exit_connection_disable;
752
753         mmc->ops = &gb_sdio_ops;
754
755         /* for now we just make a map 1:1 between max blocks and segments */
756         mmc->max_segs = host->mmc->max_blk_count;
757         mmc->max_seg_size = host->mmc->max_blk_size;
758
759         mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
760
761         max_buffer = gb_operation_get_payload_size_max(host->connection);
762         host->xfer_buffer = kzalloc(max_buffer, GFP_KERNEL);
763         if (!host->xfer_buffer) {
764                 ret = -ENOMEM;
765                 goto exit_connection_disable;
766         }
767         mutex_init(&host->lock);
768         spin_lock_init(&host->xfer);
769         host->mrq_workqueue = alloc_workqueue("mmc-%s", 0, 1,
770                                               dev_name(&gpbdev->dev));
771         if (!host->mrq_workqueue) {
772                 ret = -ENOMEM;
773                 goto exit_buf_free;
774         }
775         INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
776
777         ret = gb_connection_enable(connection);
778         if (ret)
779                 goto exit_wq_destroy;
780
781         ret = mmc_add_host(mmc);
782         if (ret < 0)
783                 goto exit_wq_destroy;
784         host->removed = false;
785         ret = _gb_sdio_process_events(host, host->queued_events);
786         host->queued_events = 0;
787
788         return ret;
789
790 exit_wq_destroy:
791         destroy_workqueue(host->mrq_workqueue);
792 exit_buf_free:
793         kfree(host->xfer_buffer);
794 exit_connection_disable:
795         gb_connection_disable(connection);
796 exit_connection_destroy:
797         gb_connection_destroy(connection);
798 exit_mmc_free:
799         mmc_free_host(mmc);
800
801         return ret;
802 }
803
804 static void gb_sdio_remove(struct gpbridge_device *gpbdev)
805 {
806         struct gb_sdio_host *host = gb_gpbridge_get_data(gpbdev);
807         struct gb_connection *connection = host->connection;
808         struct mmc_host *mmc;
809
810         mutex_lock(&host->lock);
811         host->removed = true;
812         mmc = host->mmc;
813         gb_connection_set_data(connection, NULL);
814         mutex_unlock(&host->lock);
815
816         flush_workqueue(host->mrq_workqueue);
817         destroy_workqueue(host->mrq_workqueue);
818         gb_connection_disable_rx(connection);
819         mmc_remove_host(mmc);
820         gb_connection_disable(connection);
821         gb_connection_destroy(connection);
822         kfree(host->xfer_buffer);
823         mmc_free_host(mmc);
824 }
825
826 static const struct gpbridge_device_id gb_sdio_id_table[] = {
827         { GPBRIDGE_PROTOCOL(GREYBUS_PROTOCOL_SDIO) },
828         { },
829 };
830
831 static struct gpbridge_driver sdio_driver = {
832         .name           = "sdio",
833         .probe          = gb_sdio_probe,
834         .remove         = gb_sdio_remove,
835         .id_table       = gb_sdio_id_table,
836 };
837 gb_gpbridge_builtin_driver(sdio_driver);