2 * SD/MMC Greybus driver.
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/kernel.h>
11 #include <linux/mmc/core.h>
12 #include <linux/mmc/host.h>
13 #include <linux/mmc/mmc.h>
14 #include <linux/scatterlist.h>
15 #include <linux/workqueue.h>
21 struct gb_connection *connection;
22 struct gpbridge_device *gpbdev;
24 struct mmc_request *mrq;
25 struct mutex lock; /* lock for this host */
28 spinlock_t xfer; /* lock to cancel ongoing transfer */
30 struct workqueue_struct *mrq_workqueue;
31 struct work_struct mrqwork;
39 #define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
41 #define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
42 #define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
44 #define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
45 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
47 static inline bool single_op(struct mmc_command *cmd)
49 uint32_t opcode = cmd->opcode;
51 return opcode == MMC_WRITE_BLOCK ||
52 opcode == MMC_READ_SINGLE_BLOCK;
55 static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
60 caps = ((r & GB_SDIO_CAP_NONREMOVABLE) ? MMC_CAP_NONREMOVABLE : 0) |
61 ((r & GB_SDIO_CAP_4_BIT_DATA) ? MMC_CAP_4_BIT_DATA : 0) |
62 ((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
63 ((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
64 ((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
65 ((r & GB_SDIO_CAP_ERASE) ? MMC_CAP_ERASE : 0) |
66 ((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
67 ((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
68 ((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
69 ((r & GB_SDIO_CAP_UHS_SDR12) ? MMC_CAP_UHS_SDR12 : 0) |
70 ((r & GB_SDIO_CAP_UHS_SDR25) ? MMC_CAP_UHS_SDR25 : 0) |
71 ((r & GB_SDIO_CAP_UHS_SDR50) ? MMC_CAP_UHS_SDR50 : 0) |
72 ((r & GB_SDIO_CAP_UHS_SDR104) ? MMC_CAP_UHS_SDR104 : 0) |
73 ((r & GB_SDIO_CAP_UHS_DDR50) ? MMC_CAP_UHS_DDR50 : 0) |
74 ((r & GB_SDIO_CAP_DRIVER_TYPE_A) ? MMC_CAP_DRIVER_TYPE_A : 0) |
75 ((r & GB_SDIO_CAP_DRIVER_TYPE_C) ? MMC_CAP_DRIVER_TYPE_C : 0) |
76 ((r & GB_SDIO_CAP_DRIVER_TYPE_D) ? MMC_CAP_DRIVER_TYPE_D : 0);
78 caps2 = ((r & GB_SDIO_CAP_HS200_1_2V) ? MMC_CAP2_HS200_1_2V_SDR : 0) |
79 #ifdef MMC_HS400_SUPPORTED
80 ((r & GB_SDIO_CAP_HS400_1_2V) ? MMC_CAP2_HS400_1_2V : 0) |
81 ((r & GB_SDIO_CAP_HS400_1_8V) ? MMC_CAP2_HS400_1_8V : 0) |
83 ((r & GB_SDIO_CAP_HS200_1_8V) ? MMC_CAP2_HS200_1_8V_SDR : 0);
85 host->mmc->caps = caps | MMC_CAP_NEEDS_POLL;
86 host->mmc->caps2 = caps2;
88 if (caps & MMC_CAP_NONREMOVABLE)
89 host->card_present = true;
92 static u32 _gb_sdio_get_host_ocr(u32 ocr)
94 return (((ocr & GB_SDIO_VDD_165_195) ? MMC_VDD_165_195 : 0) |
95 ((ocr & GB_SDIO_VDD_20_21) ? MMC_VDD_20_21 : 0) |
96 ((ocr & GB_SDIO_VDD_21_22) ? MMC_VDD_21_22 : 0) |
97 ((ocr & GB_SDIO_VDD_22_23) ? MMC_VDD_22_23 : 0) |
98 ((ocr & GB_SDIO_VDD_23_24) ? MMC_VDD_23_24 : 0) |
99 ((ocr & GB_SDIO_VDD_24_25) ? MMC_VDD_24_25 : 0) |
100 ((ocr & GB_SDIO_VDD_25_26) ? MMC_VDD_25_26 : 0) |
101 ((ocr & GB_SDIO_VDD_26_27) ? MMC_VDD_26_27 : 0) |
102 ((ocr & GB_SDIO_VDD_27_28) ? MMC_VDD_27_28 : 0) |
103 ((ocr & GB_SDIO_VDD_28_29) ? MMC_VDD_28_29 : 0) |
104 ((ocr & GB_SDIO_VDD_29_30) ? MMC_VDD_29_30 : 0) |
105 ((ocr & GB_SDIO_VDD_30_31) ? MMC_VDD_30_31 : 0) |
106 ((ocr & GB_SDIO_VDD_31_32) ? MMC_VDD_31_32 : 0) |
107 ((ocr & GB_SDIO_VDD_32_33) ? MMC_VDD_32_33 : 0) |
108 ((ocr & GB_SDIO_VDD_33_34) ? MMC_VDD_33_34 : 0) |
109 ((ocr & GB_SDIO_VDD_34_35) ? MMC_VDD_34_35 : 0) |
110 ((ocr & GB_SDIO_VDD_35_36) ? MMC_VDD_35_36 : 0)
114 static int gb_sdio_get_caps(struct gb_sdio_host *host)
116 struct gb_sdio_get_caps_response response;
117 struct mmc_host *mmc = host->mmc;
124 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
125 NULL, 0, &response, sizeof(response));
128 r = le32_to_cpu(response.caps);
130 _gb_sdio_set_host_caps(host, r);
132 /* get the max block size that could fit our payload */
133 data_max = gb_operation_get_payload_size_max(host->connection);
134 data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
135 data_max - sizeof(struct gb_sdio_transfer_response));
137 blksz = min(le16_to_cpu(response.max_blk_size), data_max);
138 blksz = max_t(u32, 512, blksz);
140 mmc->max_blk_size = rounddown_pow_of_two(blksz);
141 mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
142 host->data_max = data_max;
144 /* get ocr supported values */
145 ocr = _gb_sdio_get_host_ocr(le32_to_cpu(response.ocr));
146 mmc->ocr_avail = ocr;
147 mmc->ocr_avail_sdio = mmc->ocr_avail;
148 mmc->ocr_avail_sd = mmc->ocr_avail;
149 mmc->ocr_avail_mmc = mmc->ocr_avail;
151 /* get frequency range values */
152 mmc->f_min = le32_to_cpu(response.f_min);
153 mmc->f_max = le32_to_cpu(response.f_max);
158 static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
160 if (event & GB_SDIO_CARD_INSERTED)
161 host->queued_events &= ~GB_SDIO_CARD_REMOVED;
162 else if (event & GB_SDIO_CARD_REMOVED)
163 host->queued_events &= ~GB_SDIO_CARD_INSERTED;
165 host->queued_events |= event;
168 static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
170 u8 state_changed = 0;
172 if (event & GB_SDIO_CARD_INSERTED) {
173 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
175 if (host->card_present)
177 host->card_present = true;
181 if (event & GB_SDIO_CARD_REMOVED) {
182 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
184 if (!(host->card_present))
186 host->card_present = false;
190 if (event & GB_SDIO_WP) {
191 host->read_only = true;
195 dev_info(mmc_dev(host->mmc), "card %s now event\n",
196 (host->card_present ? "inserted" : "removed"));
197 mmc_detect_change(host->mmc, 0);
203 static int gb_sdio_request_handler(struct gb_operation *op)
205 struct gb_sdio_host *host = gb_connection_get_data(op->connection);
206 struct gb_message *request;
207 struct gb_sdio_event_request *payload;
212 if (type != GB_SDIO_TYPE_EVENT) {
213 dev_err(mmc_dev(host->mmc),
214 "unsupported unsolicited event: %u\n", type);
218 request = op->request;
220 if (request->payload_size < sizeof(*payload)) {
221 dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
222 request->payload_size, sizeof(*payload));
226 payload = request->payload;
227 event = payload->event;
230 _gb_queue_event(host, event);
232 ret = _gb_sdio_process_events(host, event);
237 static int gb_sdio_set_ios(struct gb_sdio_host *host,
238 struct gb_sdio_set_ios_request *request)
240 return gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS,
241 request, sizeof(*request), NULL, 0);
244 static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
245 size_t len, u16 nblocks, off_t skip)
247 struct gb_sdio_transfer_request *request;
248 struct gb_sdio_transfer_response response;
249 struct scatterlist *sg = data->sg;
250 unsigned int sg_len = data->sg_len;
256 WARN_ON(len > host->data_max);
258 request = host->xfer_buffer;
259 request->data_flags = (data->flags >> 8);
260 request->data_blocks = cpu_to_le16(nblocks);
261 request->data_blksz = cpu_to_le16(data->blksz);
263 copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
268 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_TRANSFER,
269 request, len + sizeof(*request),
270 &response, sizeof(response));
274 send_blocks = le16_to_cpu(response.data_blocks);
275 send_blksz = le16_to_cpu(response.data_blksz);
277 if (len != send_blksz * send_blocks) {
278 dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
279 len, send_blksz * send_blocks);
286 static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
287 size_t len, u16 nblocks, off_t skip)
289 struct gb_sdio_transfer_request request;
290 struct gb_sdio_transfer_response *response;
291 struct scatterlist *sg = data->sg;
292 unsigned int sg_len = data->sg_len;
298 WARN_ON(len > host->data_max);
300 request.data_flags = (data->flags >> 8);
301 request.data_blocks = cpu_to_le16(nblocks);
302 request.data_blksz = cpu_to_le16(data->blksz);
304 response = host->xfer_buffer;
306 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_TRANSFER,
307 &request, sizeof(request), response, len +
312 recv_blocks = le16_to_cpu(response->data_blocks);
313 recv_blksz = le16_to_cpu(response->data_blksz);
315 if (len != recv_blksz * recv_blocks) {
316 dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
317 recv_blksz * recv_blocks, len);
321 copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
329 static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
336 if (single_op(data->mrq->cmd) && data->blocks > 1) {
341 left = data->blksz * data->blocks;
344 /* check is a stop transmission is pending */
345 spin_lock(&host->xfer);
346 if (host->xfer_stop) {
347 host->xfer_stop = false;
348 spin_unlock(&host->xfer);
352 spin_unlock(&host->xfer);
353 len = min(left, host->data_max);
354 nblocks = len / data->blksz;
355 len = nblocks * data->blksz;
357 if (data->flags & MMC_DATA_READ) {
358 ret = _gb_sdio_recv(host, data, len, nblocks, skip);
362 ret = _gb_sdio_send(host, data, len, nblocks, skip);
366 data->bytes_xfered += len;
376 static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
378 struct gb_sdio_command_request request = {0};
379 struct gb_sdio_command_response response;
380 struct mmc_data *data = host->mrq->data;
386 switch (mmc_resp_type(cmd)) {
388 cmd_flags = GB_SDIO_RSP_NONE;
391 cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
394 cmd_flags = GB_SDIO_RSP_R1B;
397 cmd_flags = GB_SDIO_RSP_R2;
400 cmd_flags = GB_SDIO_RSP_R3_R4;
403 dev_err(mmc_dev(host->mmc), "cmd flag invalid 0x%04x\n",
409 switch (mmc_cmd_type(cmd)) {
411 cmd_type = GB_SDIO_CMD_BC;
414 cmd_type = GB_SDIO_CMD_BCR;
417 cmd_type = GB_SDIO_CMD_AC;
420 cmd_type = GB_SDIO_CMD_ADTC;
423 dev_err(mmc_dev(host->mmc), "cmd type invalid 0x%04x\n",
429 request.cmd = cmd->opcode;
430 request.cmd_flags = cmd_flags;
431 request.cmd_type = cmd_type;
432 request.cmd_arg = cpu_to_le32(cmd->arg);
433 /* some controllers need to know at command time data details */
435 request.data_blocks = cpu_to_le16(data->blocks);
436 request.data_blksz = cpu_to_le16(data->blksz);
439 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND,
440 &request, sizeof(request), &response,
445 /* no response expected */
446 if (cmd_flags & GB_SDIO_RSP_NONE)
449 /* long response expected */
450 if (cmd_flags & GB_SDIO_RSP_R2)
451 for (i = 0; i < 4; i++)
452 cmd->resp[i] = le32_to_cpu(response.resp[i]);
454 cmd->resp[0] = le32_to_cpu(response.resp[0]);
461 static void gb_sdio_mrq_work(struct work_struct *work)
463 struct gb_sdio_host *host;
464 struct mmc_request *mrq;
467 host = container_of(work, struct gb_sdio_host, mrqwork);
469 mutex_lock(&host->lock);
472 mutex_unlock(&host->lock);
473 dev_err(mmc_dev(host->mmc), "mmc request is NULL");
478 mrq->cmd->error = -ESHUTDOWN;
483 ret = gb_sdio_command(host, mrq->sbc);
488 ret = gb_sdio_command(host, mrq->cmd);
493 ret = gb_sdio_transfer(host, mrq->data);
499 ret = gb_sdio_command(host, mrq->stop);
506 mutex_unlock(&host->lock);
507 mmc_request_done(host->mmc, mrq);
510 static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
512 struct gb_sdio_host *host = mmc_priv(mmc);
513 struct mmc_command *cmd = mrq->cmd;
515 /* Check if it is a cancel to ongoing transfer */
516 if (cmd->opcode == MMC_STOP_TRANSMISSION) {
517 spin_lock(&host->xfer);
518 host->xfer_stop = true;
519 spin_unlock(&host->xfer);
522 mutex_lock(&host->lock);
528 mrq->cmd->error = -ESHUTDOWN;
531 if (!host->card_present) {
532 mrq->cmd->error = -ENOMEDIUM;
536 queue_work(host->mrq_workqueue, &host->mrqwork);
538 mutex_unlock(&host->lock);
543 mutex_unlock(&host->lock);
544 mmc_request_done(mmc, mrq);
547 static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
549 struct gb_sdio_host *host = mmc_priv(mmc);
550 struct gb_sdio_set_ios_request request;
558 mutex_lock(&host->lock);
559 request.clock = cpu_to_le32(ios->clock);
560 request.vdd = cpu_to_le32(1 << ios->vdd);
562 request.bus_mode = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
563 GB_SDIO_BUSMODE_OPENDRAIN :
564 GB_SDIO_BUSMODE_PUSHPULL);
566 switch (ios->power_mode) {
569 power_mode = GB_SDIO_POWER_OFF;
572 power_mode = GB_SDIO_POWER_UP;
575 power_mode = GB_SDIO_POWER_ON;
577 #ifdef MMC_POWER_UNDEFINED_SUPPORTED
578 case MMC_POWER_UNDEFINED:
579 power_mode = GB_SDIO_POWER_UNDEFINED;
583 request.power_mode = power_mode;
585 switch (ios->bus_width) {
586 case MMC_BUS_WIDTH_1:
587 bus_width = GB_SDIO_BUS_WIDTH_1;
589 case MMC_BUS_WIDTH_4:
591 bus_width = GB_SDIO_BUS_WIDTH_4;
593 case MMC_BUS_WIDTH_8:
594 bus_width = GB_SDIO_BUS_WIDTH_8;
597 request.bus_width = bus_width;
599 switch (ios->timing) {
600 case MMC_TIMING_LEGACY:
602 timing = GB_SDIO_TIMING_LEGACY;
604 case MMC_TIMING_MMC_HS:
605 timing = GB_SDIO_TIMING_MMC_HS;
607 case MMC_TIMING_SD_HS:
608 timing = GB_SDIO_TIMING_SD_HS;
610 case MMC_TIMING_UHS_SDR12:
611 timing = GB_SDIO_TIMING_UHS_SDR12;
613 case MMC_TIMING_UHS_SDR25:
614 timing = GB_SDIO_TIMING_UHS_SDR25;
616 case MMC_TIMING_UHS_SDR50:
617 timing = GB_SDIO_TIMING_UHS_SDR50;
619 case MMC_TIMING_UHS_SDR104:
620 timing = GB_SDIO_TIMING_UHS_SDR104;
622 case MMC_TIMING_UHS_DDR50:
623 timing = GB_SDIO_TIMING_UHS_DDR50;
625 #ifdef MMC_DDR52_DEFINED
626 case MMC_TIMING_MMC_DDR52:
627 timing = GB_SDIO_TIMING_MMC_DDR52;
630 case MMC_TIMING_MMC_HS200:
631 timing = GB_SDIO_TIMING_MMC_HS200;
633 #ifdef MMC_HS400_SUPPORTED
634 case MMC_TIMING_MMC_HS400:
635 timing = GB_SDIO_TIMING_MMC_HS400;
639 request.timing = timing;
641 switch (ios->signal_voltage) {
642 case MMC_SIGNAL_VOLTAGE_330:
643 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
645 case MMC_SIGNAL_VOLTAGE_180:
647 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
649 case MMC_SIGNAL_VOLTAGE_120:
650 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
653 request.signal_voltage = signal_voltage;
655 switch (ios->drv_type) {
656 case MMC_SET_DRIVER_TYPE_A:
657 drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
659 case MMC_SET_DRIVER_TYPE_C:
660 drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
662 case MMC_SET_DRIVER_TYPE_D:
663 drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
665 case MMC_SET_DRIVER_TYPE_B:
667 drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
670 request.drv_type = drv_type;
672 ret = gb_sdio_set_ios(host, &request);
676 memcpy(&mmc->ios, ios, sizeof(mmc->ios));
679 mutex_unlock(&host->lock);
682 static int gb_mmc_get_ro(struct mmc_host *mmc)
684 struct gb_sdio_host *host = mmc_priv(mmc);
686 mutex_lock(&host->lock);
689 mutex_unlock(&host->lock);
690 return host->read_only;
693 static int gb_mmc_get_cd(struct mmc_host *mmc)
695 struct gb_sdio_host *host = mmc_priv(mmc);
697 mutex_lock(&host->lock);
700 mutex_unlock(&host->lock);
701 return host->card_present;
704 static const struct mmc_host_ops gb_sdio_ops = {
705 .request = gb_mmc_request,
706 .set_ios = gb_mmc_set_ios,
707 .get_ro = gb_mmc_get_ro,
708 .get_cd = gb_mmc_get_cd,
711 static int gb_sdio_probe(struct gpbridge_device *gpbdev,
712 const struct gpbridge_device_id *id)
714 struct gb_connection *connection;
715 struct mmc_host *mmc;
716 struct gb_sdio_host *host;
720 mmc = mmc_alloc_host(sizeof(*host), &gpbdev->dev);
724 connection = gb_connection_create(gpbdev->bundle,
725 le16_to_cpu(gpbdev->cport_desc->id),
726 gb_sdio_request_handler);
727 if (IS_ERR(connection)) {
728 ret = PTR_ERR(connection);
732 host = mmc_priv(mmc);
734 host->removed = true;
736 host->connection = connection;
737 gb_connection_set_data(connection, host);
738 host->gpbdev = gpbdev;
739 gb_gpbridge_set_data(gpbdev, host);
741 ret = gb_connection_enable_tx(connection);
743 goto exit_connection_destroy;
745 ret = gb_gpbridge_get_version(connection);
747 goto exit_connection_disable;
749 ret = gb_sdio_get_caps(host);
751 goto exit_connection_disable;
753 mmc->ops = &gb_sdio_ops;
755 /* for now we just make a map 1:1 between max blocks and segments */
756 mmc->max_segs = host->mmc->max_blk_count;
757 mmc->max_seg_size = host->mmc->max_blk_size;
759 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
761 max_buffer = gb_operation_get_payload_size_max(host->connection);
762 host->xfer_buffer = kzalloc(max_buffer, GFP_KERNEL);
763 if (!host->xfer_buffer) {
765 goto exit_connection_disable;
767 mutex_init(&host->lock);
768 spin_lock_init(&host->xfer);
769 host->mrq_workqueue = alloc_workqueue("mmc-%s", 0, 1,
770 dev_name(&gpbdev->dev));
771 if (!host->mrq_workqueue) {
775 INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
777 ret = gb_connection_enable(connection);
779 goto exit_wq_destroy;
781 ret = mmc_add_host(mmc);
783 goto exit_wq_destroy;
784 host->removed = false;
785 ret = _gb_sdio_process_events(host, host->queued_events);
786 host->queued_events = 0;
791 destroy_workqueue(host->mrq_workqueue);
793 kfree(host->xfer_buffer);
794 exit_connection_disable:
795 gb_connection_disable(connection);
796 exit_connection_destroy:
797 gb_connection_destroy(connection);
804 static void gb_sdio_remove(struct gpbridge_device *gpbdev)
806 struct gb_sdio_host *host = gb_gpbridge_get_data(gpbdev);
807 struct gb_connection *connection = host->connection;
808 struct mmc_host *mmc;
810 mutex_lock(&host->lock);
811 host->removed = true;
813 gb_connection_set_data(connection, NULL);
814 mutex_unlock(&host->lock);
816 flush_workqueue(host->mrq_workqueue);
817 destroy_workqueue(host->mrq_workqueue);
818 gb_connection_disable_rx(connection);
819 mmc_remove_host(mmc);
820 gb_connection_disable(connection);
821 gb_connection_destroy(connection);
822 kfree(host->xfer_buffer);
826 static const struct gpbridge_device_id gb_sdio_id_table[] = {
827 { GPBRIDGE_PROTOCOL(GREYBUS_PROTOCOL_SDIO) },
831 static struct gpbridge_driver sdio_driver = {
833 .probe = gb_sdio_probe,
834 .remove = gb_sdio_remove,
835 .id_table = gb_sdio_id_table,
837 gb_gpbridge_builtin_driver(sdio_driver);