if (q->mq_ops) {
if (blk_queue_io_stat(q))
blk_account_io_start(rq, true);
- blk_mq_insert_request(rq, false, true, true);
+ blk_mq_insert_request(rq, false, true, false);
return 0;
}
rq = NULL;
break;
- } else if (ret == BLKPREP_KILL) {
+ } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
+ int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
+
rq->cmd_flags |= REQ_QUIET;
/*
* Mark this request as started so we don't trigger
* any debug logic in the end I/O path.
*/
blk_start_request(rq);
- __blk_end_request_all(rq, -EIO);
+ __blk_end_request_all(rq, err);
} else {
printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
break;
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL(blk_post_runtime_resume);
+
+ /**
+ * blk_set_runtime_active - Force runtime status of the queue to be active
+ * @q: the queue of the device
+ *
+ * If the device is left runtime suspended during system suspend the resume
+ * hook typically resumes the device and corrects runtime status
+ * accordingly. However, that does not affect the queue runtime PM status
+ * which is still "suspended". This prevents processing requests from the
+ * queue.
+ *
+ * This function can be used in driver's resume hook to correct queue
+ * runtime PM status and re-enable peeking requests from the queue. It
+ * should be called before first request is added to the queue.
+ */
+ void blk_set_runtime_active(struct request_queue *q)
+ {
+ spin_lock_irq(q->queue_lock);
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ pm_request_autosuspend(q->dev);
+ spin_unlock_irq(q->queue_lock);
+ }
+ EXPORT_SYMBOL(blk_set_runtime_active);
#endif
int __init blk_dev_init(void)
#include <linux/transport_class.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-
+#include <linux/idr.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include "scsi_logging.h"
-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
+static DEFINE_IDA(host_index_ida);
static void scsi_host_cls_release(struct device *dev)
if (error)
goto out_destroy_freelist;
+ /*
+ * Increase usage count temporarily here so that calling
+ * scsi_autopm_put_host() will trigger runtime idle if there is
+ * nothing else preventing suspending the device.
+ */
+ pm_runtime_get_noresume(&shost->shost_gendev);
pm_runtime_set_active(&shost->shost_gendev);
pm_runtime_enable(&shost->shost_gendev);
device_enable_async_suspend(&shost->shost_gendev);
goto out_destroy_host;
scsi_proc_host_add(shost);
+ scsi_autopm_put_host(shost);
return error;
out_destroy_host:
kfree(shost->shost_data);
+ ida_simple_remove(&host_index_ida, shost->host_no);
+
if (parent)
put_device(parent);
kfree(shost);
{
struct Scsi_Host *shost;
gfp_t gfp_mask = GFP_KERNEL;
+ int index;
if (sht->unchecked_isa_dma && privsize)
gfp_mask |= __GFP_DMA;
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
- /*
- * subtract one because we increment first then return, but we need to
- * know what the next host number was before increment
- */
- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
+ index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
+ if (index < 0)
+ goto fail_kfree;
+ shost->host_no = index;
+
shost->dma_channel = 0xff;
/* These three are default values which can be overridden */
shost_printk(KERN_WARNING, shost,
"error handler thread failed to spawn, error = %ld\n",
PTR_ERR(shost->ehandler));
- goto fail_kfree;
+ goto fail_index_remove;
}
shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
fail_kthread:
kthread_stop(shost->ehandler);
+ fail_index_remove:
+ ida_simple_remove(&host_index_ida, shost->host_no);
fail_kfree:
kfree(shost);
return NULL;
void scsi_exit_hosts(void)
{
class_unregister(&shost_class);
+ ida_destroy(&host_index_ida);
}
int scsi_is_host_device(const struct device *dev)
/*
* q->prep_rq_fn return values
*/
-#define BLKPREP_OK 0 /* serve it */
-#define BLKPREP_KILL 1 /* fatal error, kill */
-#define BLKPREP_DEFER 2 /* leave on queue */
+enum {
+ BLKPREP_OK, /* serve it */
+ BLKPREP_KILL, /* fatal error, kill, return -EIO */
+ BLKPREP_DEFER, /* leave on queue */
+ BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
+};
extern unsigned long blk_max_low_pfn, blk_max_pfn;
{
struct request_queue *q = rq->q;
- if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
+ if (unlikely(rq->cmd_type != REQ_TYPE_FS))
return q->limits.max_hw_sectors;
if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
extern void blk_pre_runtime_resume(struct request_queue *q);
extern void blk_post_runtime_resume(struct request_queue *q, int err);
+ extern void blk_set_runtime_active(struct request_queue *q);
#else
static inline void blk_pm_runtime_init(struct request_queue *q,
struct device *dev) {}
static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
static inline void blk_pre_runtime_resume(struct request_queue *q) {}
static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
+ extern inline void blk_set_runtime_active(struct request_queue *q) {}
#endif
/*
page_cache_release(p.v);
}
+static inline bool __bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ return offset ||
+ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
/*
* Check if adding a bio_vec after bprv with offset would create a gap in
* the SG list. Most drivers don't care about this, but some do.
{
if (!queue_virt_boundary(q))
return false;
- return offset ||
- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+ return __bvec_gap_to_prev(q, bprv, offset);
}
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
struct bio *next)
{
- if (!bio_has_data(prev))
- return false;
+ if (bio_has_data(prev) && queue_virt_boundary(q)) {
+ struct bio_vec pb, nb;
+
+ bio_get_last_bvec(prev, &pb);
+ bio_get_first_bvec(next, &nb);
- return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
- next->bi_io_vec[0].bv_offset);
+ return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+ }
+
+ return false;
}
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)