summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio-integrity.c17
-rw-r--r--block/blk-cgroup.c1
-rw-r--r--block/blk-core.c108
-rw-r--r--block/blk-integrity.c192
-rw-r--r--block/blk-lib.c31
-rw-r--r--block/blk-merge.c32
-rw-r--r--block/blk-mq-cpumap.c9
-rw-r--r--block/blk-mq-sysfs.c40
-rw-r--r--block/blk-mq-tag.c32
-rw-r--r--block/blk-mq-tag.h2
-rw-r--r--block/blk-mq.c291
-rw-r--r--block/blk-mq.h5
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/blk.h23
-rw-r--r--block/cfq-iosched.c4
-rw-r--r--block/elevator.c2
-rw-r--r--block/genhd.c2
-rw-r--r--block/ioctl.c330
-rw-r--r--block/partition-generic.c1
-rw-r--r--block/t10-pi.c16
21 files changed, 726 insertions, 418 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 14b8faf8b09d..f6325d573c10 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -32,6 +32,11 @@
static struct kmem_cache *bip_slab;
static struct workqueue_struct *kintegrityd_wq;
+void blk_flush_integrity(void)
+{
+ flush_workqueue(kintegrityd_wq);
+}
+
/**
* bio_integrity_alloc - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
@@ -177,11 +182,11 @@ bool bio_integrity_enabled(struct bio *bio)
if (bi == NULL)
return false;
- if (bio_data_dir(bio) == READ && bi->verify_fn != NULL &&
+ if (bio_data_dir(bio) == READ && bi->profile->verify_fn != NULL &&
(bi->flags & BLK_INTEGRITY_VERIFY))
return true;
- if (bio_data_dir(bio) == WRITE && bi->generate_fn != NULL &&
+ if (bio_data_dir(bio) == WRITE && bi->profile->generate_fn != NULL &&
(bi->flags & BLK_INTEGRITY_GENERATE))
return true;
@@ -202,7 +207,7 @@ EXPORT_SYMBOL(bio_integrity_enabled);
static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
unsigned int sectors)
{
- return sectors >> (ilog2(bi->interval) - 9);
+ return sectors >> (bi->interval_exp - 9);
}
static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
@@ -229,7 +234,7 @@ static int bio_integrity_process(struct bio *bio,
bip->bip_vec->bv_offset;
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
- iter.interval = bi->interval;
+ iter.interval = 1 << bi->interval_exp;
iter.seed = bip_get_seed(bip);
iter.prot_buf = prot_buf;
@@ -340,7 +345,7 @@ int bio_integrity_prep(struct bio *bio)
/* Auto-generate integrity metadata if this is a write */
if (bio_data_dir(bio) == WRITE)
- bio_integrity_process(bio, bi->generate_fn);
+ bio_integrity_process(bio, bi->profile->generate_fn);
return 0;
}
@@ -361,7 +366,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio *bio = bip->bip_bio;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
- bio->bi_error = bio_integrity_process(bio, bi->verify_fn);
+ bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn);
/* Restore original bio completion handler */
bio->bi_end_io = bip->bip_end_io;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 55512dd62633..5bcdfc10c23a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -899,6 +899,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
struct cftype blkcg_files[] = {
{
.name = "stat",
+ .flags = CFTYPE_NOT_ON_ROOT,
.seq_show = blkcg_print_stat,
},
{ } /* terminate */
diff --git a/block/blk-core.c b/block/blk-core.c
index 2eb722d48773..89eec7965870 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -554,29 +554,30 @@ void blk_cleanup_queue(struct request_queue *q)
* Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that q->request_fn() gets invoked after draining finished.
*/
- if (q->mq_ops) {
- blk_mq_freeze_queue(q);
- spin_lock_irq(lock);
- } else {
- spin_lock_irq(lock);
+ blk_freeze_queue(q);
+ spin_lock_irq(lock);
+ if (!q->mq_ops)
__blk_drain_queue(q, true);
- }
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
+ /* for synchronous bio-based driver finish in-flight integrity i/o */
+ blk_flush_integrity();
+
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
blk_sync_queue(q);
if (q->mq_ops)
blk_mq_free_queue(q);
+ percpu_ref_exit(&q->q_usage_counter);
spin_lock_irq(lock);
if (q->queue_lock != &q->__queue_lock)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);
- bdi_destroy(&q->backing_dev_info);
+ bdi_unregister(&q->backing_dev_info);
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
@@ -629,6 +630,40 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
}
EXPORT_SYMBOL(blk_alloc_queue);
+int blk_queue_enter(struct request_queue *q, gfp_t gfp)
+{
+ while (true) {
+ int ret;
+
+ if (percpu_ref_tryget_live(&q->q_usage_counter))
+ return 0;
+
+ if (!(gfp & __GFP_WAIT))
+ return -EBUSY;
+
+ ret = wait_event_interruptible(q->mq_freeze_wq,
+ !atomic_read(&q->mq_freeze_depth) ||
+ blk_queue_dying(q));
+ if (blk_queue_dying(q))
+ return -ENODEV;
+ if (ret)
+ return ret;
+ }
+}
+
+void blk_queue_exit(struct request_queue *q)
+{
+ percpu_ref_put(&q->q_usage_counter);
+}
+
+static void blk_queue_usage_counter_release(struct percpu_ref *ref)
+{
+ struct request_queue *q =
+ container_of(ref, struct request_queue, q_usage_counter);
+
+ wake_up_all(&q->mq_freeze_wq);
+}
+
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
@@ -690,11 +725,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
init_waitqueue_head(&q->mq_freeze_wq);
- if (blkcg_init_queue(q))
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+ * See blk_register_queue() for details.
+ */
+ if (percpu_ref_init(&q->q_usage_counter,
+ blk_queue_usage_counter_release,
+ PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto fail_bdi;
+ if (blkcg_init_queue(q))
+ goto fail_ref;
+
return q;
+fail_ref:
+ percpu_ref_exit(&q->q_usage_counter);
fail_bdi:
bdi_destroy(&q->backing_dev_info);
fail_split:
@@ -1594,6 +1640,30 @@ out:
return ret;
}
+unsigned int blk_plug_queued_count(struct request_queue *q)
+{
+ struct blk_plug *plug;
+ struct request *rq;
+ struct list_head *plug_list;
+ unsigned int ret = 0;
+
+ plug = current->plug;
+ if (!plug)
+ goto out;
+
+ if (q->mq_ops)
+ plug_list = &plug->mq_list;
+ else
+ plug_list = &plug->list;
+
+ list_for_each_entry(rq, plug_list, queuelist) {
+ if (rq->q == q)
+ ret++;
+ }
+out:
+ return ret;
+}
+
void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;
@@ -1641,9 +1711,11 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
* Check if we can merge with the plugged list before grabbing
* any locks.
*/
- if (!blk_queue_nomerges(q) &&
- blk_attempt_plug_merge(q, bio, &request_count, NULL))
- return;
+ if (!blk_queue_nomerges(q)) {
+ if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+ return;
+ } else
+ request_count = blk_plug_queued_count(q);
spin_lock_irq(q->queue_lock);
@@ -1966,9 +2038,19 @@ void generic_make_request(struct bio *bio)
do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- q->make_request_fn(q, bio);
+ if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
+
+ q->make_request_fn(q, bio);
+
+ blk_queue_exit(q);
- bio = bio_list_pop(current->bio_list);
+ bio = bio_list_pop(current->bio_list);
+ } else {
+ struct bio *bio_next = bio_list_pop(current->bio_list);
+
+ bio_io_error(bio);
+ bio = bio_next;
+ }
} while (bio);
current->bio_list = NULL; /* deactivate */
}
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 75f29cf70188..d69c5c79f98e 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -30,10 +30,6 @@
#include "blk.h"
-static struct kmem_cache *integrity_cachep;
-
-static const char *bi_unsupported_name = "unsupported";
-
/**
* blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
* @q: request queue
@@ -146,40 +142,40 @@ EXPORT_SYMBOL(blk_rq_map_integrity_sg);
*/
int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
{
- struct blk_integrity *b1 = gd1->integrity;
- struct blk_integrity *b2 = gd2->integrity;
+ struct blk_integrity *b1 = &gd1->queue->integrity;
+ struct blk_integrity *b2 = &gd2->queue->integrity;
- if (!b1 && !b2)
+ if (!b1->profile && !b2->profile)
return 0;
- if (!b1 || !b2)
+ if (!b1->profile || !b2->profile)
return -1;
- if (b1->interval != b2->interval) {
+ if (b1->interval_exp != b2->interval_exp) {
pr_err("%s: %s/%s protection interval %u != %u\n",
__func__, gd1->disk_name, gd2->disk_name,
- b1->interval, b2->interval);
+ 1 << b1->interval_exp, 1 << b2->interval_exp);
return -1;
}
if (b1->tuple_size != b2->tuple_size) {
- printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__,
+ pr_err("%s: %s/%s tuple sz %u != %u\n", __func__,
gd1->disk_name, gd2->disk_name,
b1->tuple_size, b2->tuple_size);
return -1;
}
if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
- printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__,
+ pr_err("%s: %s/%s tag sz %u != %u\n", __func__,
gd1->disk_name, gd2->disk_name,
b1->tag_size, b2->tag_size);
return -1;
}
- if (strcmp(b1->name, b2->name)) {
- printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__,
+ if (b1->profile != b2->profile) {
+ pr_err("%s: %s/%s type %s != %s\n", __func__,
gd1->disk_name, gd2->disk_name,
- b1->name, b2->name);
+ b1->profile->name, b2->profile->name);
return -1;
}
@@ -249,8 +245,8 @@ struct integrity_sysfs_entry {
static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr,
char *page)
{
- struct blk_integrity *bi =
- container_of(kobj, struct blk_integrity, kobj);
+ struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
+ struct blk_integrity *bi = &disk->queue->integrity;
struct integrity_sysfs_entry *entry =
container_of(attr, struct integrity_sysfs_entry, attr);
@@ -261,8 +257,8 @@ static ssize_t integrity_attr_store(struct kobject *kobj,
struct attribute *attr, const char *page,
size_t count)
{
- struct blk_integrity *bi =
- container_of(kobj, struct blk_integrity, kobj);
+ struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
+ struct blk_integrity *bi = &disk->queue->integrity;
struct integrity_sysfs_entry *entry =
container_of(attr, struct integrity_sysfs_entry, attr);
ssize_t ret = 0;
@@ -275,18 +271,21 @@ static ssize_t integrity_attr_store(struct kobject *kobj,
static ssize_t integrity_format_show(struct blk_integrity *bi, char *page)
{
- if (bi != NULL && bi->name != NULL)
- return sprintf(page, "%s\n", bi->name);
+ if (bi->profile && bi->profile->name)
+ return sprintf(page, "%s\n", bi->profile->name);
else
return sprintf(page, "none\n");
}
static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page)
{
- if (bi != NULL)
- return sprintf(page, "%u\n", bi->tag_size);
- else
- return sprintf(page, "0\n");
+ return sprintf(page, "%u\n", bi->tag_size);
+}
+
+static ssize_t integrity_interval_show(struct blk_integrity *bi, char *page)
+{
+ return sprintf(page, "%u\n",
+ bi->interval_exp ? 1 << bi->interval_exp : 0);
}
static ssize_t integrity_verify_store(struct blk_integrity *bi,
@@ -343,6 +342,11 @@ static struct integrity_sysfs_entry integrity_tag_size_entry = {
.show = integrity_tag_size_show,
};
+static struct integrity_sysfs_entry integrity_interval_entry = {
+ .attr = { .name = "protection_interval_bytes", .mode = S_IRUGO },
+ .show = integrity_interval_show,
+};
+
static struct integrity_sysfs_entry integrity_verify_entry = {
.attr = { .name = "read_verify", .mode = S_IRUGO | S_IWUSR },
.show = integrity_verify_show,
@@ -363,6 +367,7 @@ static struct integrity_sysfs_entry integrity_device_entry = {
static struct attribute *integrity_attrs[] = {
&integrity_format_entry.attr,
&integrity_tag_size_entry.attr,
+ &integrity_interval_entry.attr,
&integrity_verify_entry.attr,
&integrity_generate_entry.attr,
&integrity_device_entry.attr,
@@ -374,114 +379,89 @@ static const struct sysfs_ops integrity_ops = {
.store = &integrity_attr_store,
};
-static int __init blk_dev_integrity_init(void)
-{
- integrity_cachep = kmem_cache_create("blkdev_integrity",
- sizeof(struct blk_integrity),
- 0, SLAB_PANIC, NULL);
- return 0;
-}
-subsys_initcall(blk_dev_integrity_init);
-
-static void blk_integrity_release(struct kobject *kobj)
-{
- struct blk_integrity *bi =
- container_of(kobj, struct blk_integrity, kobj);
-
- kmem_cache_free(integrity_cachep, bi);
-}
-
static struct kobj_type integrity_ktype = {
.default_attrs = integrity_attrs,
.sysfs_ops = &integrity_ops,
- .release = blk_integrity_release,
};
-bool blk_integrity_is_initialized(struct gendisk *disk)
+static int blk_integrity_nop_fn(struct blk_integrity_iter *iter)
{
- struct blk_integrity *bi = blk_get_integrity(disk);
-
- return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0);
+ return 0;
}
-EXPORT_SYMBOL(blk_integrity_is_initialized);
+
+static struct blk_integrity_profile nop_profile = {
+ .name = "nop",
+ .generate_fn = blk_integrity_nop_fn,
+ .verify_fn = blk_integrity_nop_fn,
+};
/**
* blk_integrity_register - Register a gendisk as being integrity-capable
* @disk: struct gendisk pointer to make integrity-aware
- * @template: optional integrity profile to register
+ * @template: block integrity profile to register
*
- * Description: When a device needs to advertise itself as being able
- * to send/receive integrity metadata it must use this function to
- * register the capability with the block layer. The template is a
- * blk_integrity struct with values appropriate for the underlying
- * hardware. If template is NULL the new profile is allocated but
- * not filled out. See Documentation/block/data-integrity.txt.
+ * Description: When a device needs to advertise itself as being able to
+ * send/receive integrity metadata it must use this function to register
+ * the capability with the block layer. The template is a blk_integrity
+ * struct with values appropriate for the underlying hardware. See
+ * Documentation/block/data-integrity.txt.
*/
-int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
+void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
{
- struct blk_integrity *bi;
+ struct blk_integrity *bi = &disk->queue->integrity;
- BUG_ON(disk == NULL);
+ bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
+ template->flags;
+ bi->interval_exp = ilog2(queue_logical_block_size(disk->queue));
+ bi->profile = template->profile ? template->profile : &nop_profile;
+ bi->tuple_size = template->tuple_size;
+ bi->tag_size = template->tag_size;
- if (disk->integrity == NULL) {
- bi = kmem_cache_alloc(integrity_cachep,
- GFP_KERNEL | __GFP_ZERO);
- if (!bi)
- return -1;
-
- if (kobject_init_and_add(&bi->kobj, &integrity_ktype,
- &disk_to_dev(disk)->kobj,
- "%s", "integrity")) {
- kmem_cache_free(integrity_cachep, bi);
- return -1;
- }
-
- kobject_uevent(&bi->kobj, KOBJ_ADD);
-
- bi->flags |= BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE;
- bi->interval = queue_logical_block_size(disk->queue);
- disk->integrity = bi;
- } else
- bi = disk->integrity;
-
- /* Use the provided profile as template */
- if (template != NULL) {
- bi->name = template->name;
- bi->generate_fn = template->generate_fn;
- bi->verify_fn = template->verify_fn;
- bi->tuple_size = template->tuple_size;
- bi->tag_size = template->tag_size;
- bi->flags |= template->flags;
- } else
- bi->name = bi_unsupported_name;
-
- disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
-
- return 0;
+ blk_integrity_revalidate(disk);
}
EXPORT_SYMBOL(blk_integrity_register);
/**
- * blk_integrity_unregister - Remove block integrity profile
- * @disk: disk whose integrity profile to deallocate
+ * blk_integrity_unregister - Unregister block integrity profile
+ * @disk: disk whose integrity profile to unregister
*
- * Description: This function frees all memory used by the block
- * integrity profile. To be called at device teardown.
+ * Description: This function unregisters the integrity capability from
+ * a block device.
*/
void blk_integrity_unregister(struct gendisk *disk)
{
- struct blk_integrity *bi;
+ blk_integrity_revalidate(disk);
+ memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
+}
+EXPORT_SYMBOL(blk_integrity_unregister);
+
+void blk_integrity_revalidate(struct gendisk *disk)
+{
+ struct blk_integrity *bi = &disk->queue->integrity;
- if (!disk || !disk->integrity)
+ if (!(disk->flags & GENHD_FL_UP))
return;
- disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
+ if (bi->profile)
+ disk->queue->backing_dev_info.capabilities |=
+ BDI_CAP_STABLE_WRITES;
+ else
+ disk->queue->backing_dev_info.capabilities &=
+ ~BDI_CAP_STABLE_WRITES;
+}
+
+void blk_integrity_add(struct gendisk *disk)
+{
+ if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
+ &disk_to_dev(disk)->kobj, "%s", "integrity"))
+ return;
- bi = disk->integrity;
+ kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
+}
- kobject_uevent(&bi->kobj, KOBJ_REMOVE);
- kobject_del(&bi->kobj);
- kobject_put(&bi->kobj);
- disk->integrity = NULL;
+void blk_integrity_del(struct gendisk *disk)
+{
+ kobject_uevent(&disk->integrity_kobj, KOBJ_REMOVE);
+ kobject_del(&disk->integrity_kobj);
+ kobject_put(&disk->integrity_kobj);
}
-EXPORT_SYMBOL(blk_integrity_unregister);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index bd40292e5009..9ebf65379556 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -26,13 +26,6 @@ static void bio_batch_end_io(struct bio *bio)
bio_put(bio);
}
-/*
- * Ensure that max discard sectors doesn't overflow bi_size and hopefully
- * it is of the proper granularity as long as the granularity is a power
- * of two.
- */
-#define MAX_BIO_SECTORS ((1U << 31) >> 9)
-
/**
* blkdev_issue_discard - queue a discard
* @bdev: blockdev to issue discard for
@@ -50,6 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD;
+ unsigned int granularity;
+ int alignment;
struct bio_batch bb;
struct bio *bio;
int ret = 0;
@@ -61,6 +56,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
+ /* Zero-sector (unknown) and one-sector granularities are the same. */
+ granularity = max(q->limits.discard_granularity >> 9, 1U);
+ alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
+
if (flags & BLKDEV_DISCARD_SECURE) {
if (!blk_queue_secdiscard(q))
return -EOPNOTSUPP;
@@ -74,7 +73,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
blk_start_plug(&plug);
while (nr_sects) {
unsigned int req_sects;
- sector_t end_sect;
+ sector_t end_sect, tmp;
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
@@ -82,8 +81,22 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
break;
}
- req_sects = min_t(sector_t, nr_sects, MAX_BIO_SECTORS);
+ /* Make sure bi_size doesn't overflow */
+ req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
+
+ /*
+ * If splitting a request, and the next starting sector would be
+ * misaligned, stop the discard at the previous aligned sector.
+ */
end_sect = sector + req_sects;
+ tmp = end_sect;
+ if (req_sects < nr_sects &&
+ sector_div(tmp, granularity) != alignment) {
+ end_sect = end_sect - alignment;
+ sector_div(end_sect, granularity);
+ end_sect = end_sect * granularity + alignment;
+ req_sects = end_sect - sector;
+ }
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index c4e9c37f3e38..de5716d8e525 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -11,13 +11,16 @@
static struct bio *blk_bio_discard_split(struct request_queue *q,
struct bio *bio,
- struct bio_set *bs)
+ struct bio_set *bs,
+ unsigned *nsegs)
{
unsigned int max_discard_sectors, granularity;
int alignment;
sector_t tmp;
unsigned split_sectors;
+ *nsegs = 1;
+
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
@@ -51,8 +54,11 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
static struct bio *blk_bio_write_same_split(struct request_queue *q,
struct bio *bio,
- struct bio_set *bs)
+ struct bio_set *bs,
+ unsigned *nsegs)
{
+ *nsegs = 1;
+
if (!q->limits.max_write_same_sectors)
return NULL;
@@ -64,7 +70,8 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
static struct bio *blk_bio_segment_split(struct request_queue *q,
struct bio *bio,
- struct bio_set *bs)
+ struct bio_set *bs,
+ unsigned *segs)
{
struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
@@ -106,24 +113,35 @@ new_segment:
sectors += bv.bv_len >> 9;
}
+ *segs = nsegs;
return NULL;
split:
+ *segs = nsegs;
return bio_split(bio, sectors, GFP_NOIO, bs);
}
void blk_queue_split(struct request_queue *q, struct bio **bio,
struct bio_set *bs)
{
- struct bio *split;
+ struct bio *split, *res;
+ unsigned nsegs;
if ((*bio)->bi_rw & REQ_DISCARD)
- split = blk_bio_discard_split(q, *bio, bs);
+ split = blk_bio_discard_split(q, *bio, bs, &nsegs);
else if ((*bio)->bi_rw & REQ_WRITE_SAME)
- split = blk_bio_write_same_split(q, *bio, bs);
+ split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
else
- split = blk_bio_segment_split(q, *bio, q->bio_split);
+ split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
+
+ /* physical segments can be figured out during splitting */
+ res = split ? split : *bio;
+ res->bi_phys_segments = nsegs;
+ bio_set_flag(res, BIO_SEG_VALID);
if (split) {
+ /* there isn't chance to merge the splitted bio */
+ split->bi_rw |= REQ_NOMERGE;
+
bio_chain(split, *bio);
generic_make_request(*bio);
*bio = split;
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 1e28ddb656b8..8764c241e5bb 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu)
return cpu;
}
-int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
+int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
+ const struct cpumask *online_mask)
{
unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
cpumask_var_t cpus;
@@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
cpumask_clear(cpus);
nr_cpus = nr_uniq_cpus = 0;
- for_each_online_cpu(i) {
+ for_each_cpu(i, online_mask) {
nr_cpus++;
first_sibling = get_first_sibling(i);
if (!cpumask_test_cpu(first_sibling, cpus))
@@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
queue = 0;
for_each_possible_cpu(i) {
- if (!cpu_online(i)) {
+ if (!cpumask_test_cpu(i, online_mask)) {
map[i] = 0;
continue;
}
@@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
if (!map)
return NULL;
- if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
+ if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
return map;
kfree(map);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 279c5d674edf..6f57a110289c 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
unsigned int i, first = 1;
ssize_t ret = 0;
- blk_mq_disable_hotplug();
-
for_each_cpu(i, hctx->cpumask) {
if (first)
ret += sprintf(ret + page, "%u", i);
@@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
first = 0;
}
- blk_mq_enable_hotplug();
-
ret += sprintf(ret + page, "\n");
return ret;
}
@@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
struct blk_mq_ctx *ctx;
int i;
- if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+ if (!hctx->nr_ctx)
return;
hctx_for_each_ctx(hctx, ctx, i)
@@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
struct blk_mq_ctx *ctx;
int i, ret;
- if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+ if (!hctx->nr_ctx)
return 0;
ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
@@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk)
struct blk_mq_ctx *ctx;
int i, j;
+ blk_mq_disable_hotplug();
+
queue_for_each_hw_ctx(q, hctx, i) {
blk_mq_unregister_hctx(hctx);
@@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk)
kobject_put(&q->mq_kobj);
kobject_put(&disk_to_dev(disk)->kobj);
+
+ q->mq_sysfs_init_done = false;
+ blk_mq_enable_hotplug();
}
static void blk_mq_sysfs_init(struct request_queue *q)
@@ -412,12 +413,6 @@ static void blk_mq_sysfs_init(struct request_queue *q)
kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
}
-/* see blk_register_queue() */
-void blk_mq_finish_init(struct request_queue *q)
-{
- percpu_ref_switch_to_percpu(&q->mq_usage_counter);
-}
-
int blk_mq_register_disk(struct gendisk *disk)
{
struct device *dev = disk_to_dev(disk);
@@ -425,27 +420,30 @@ int blk_mq_register_disk(struct gendisk *disk)
struct blk_mq_hw_ctx *hctx;
int ret, i;
+ blk_mq_disable_hotplug();
+
blk_mq_sysfs_init(q);
ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
if (ret < 0)
- return ret;
+ goto out;
kobject_uevent(&q->mq_kobj, KOBJ_ADD);
queue_for_each_hw_ctx(q, hctx, i) {
- hctx->flags |= BLK_MQ_F_SYSFS_UP;
ret = blk_mq_register_hctx(hctx);
if (ret)
break;
}
- if (ret) {
+ if (ret)
blk_mq_unregister_disk(disk);
- return ret;
- }
+ else
+ q->mq_sysfs_init_done = true;
+out:
+ blk_mq_enable_hotplug();
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(blk_mq_register_disk);
@@ -454,6 +452,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
int i;
+ if (!q->mq_sysfs_init_done)
+ return;
+
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
}
@@ -463,6 +464,9 @@ int blk_mq_sysfs_register(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
int i, ret = 0;
+ if (!q->mq_sysfs_init_done)
+ return ret;
+
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
if (ret)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 9115c6d59948..60ac684c8b8c 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -75,6 +75,10 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
struct blk_mq_bitmap_tags *bt;
int i, wake_index;
+ /*
+ * Make sure all changes prior to this are visible from other CPUs.
+ */
+ smp_mb();
bt = &tags->bitmap_tags;
wake_index = atomic_read(&bt->wake_index);
for (i = 0; i < BT_WAIT_QUEUES; i++) {
@@ -471,17 +475,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
}
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv)
{
- struct blk_mq_tags *tags = hctx->tags;
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ struct blk_mq_tags *tags = hctx->tags;
+
+ /*
+ * If not software queues are currently mapped to this
+ * hardware queue, there's nothing to check
+ */
+ if (!blk_mq_hw_queue_mapped(hctx))
+ continue;
+
+ if (tags->nr_reserved_tags)
+ bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
+ bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
+ false);
+ }
- if (tags->nr_reserved_tags)
- bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
- bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
- false);
}
-EXPORT_SYMBOL(blk_mq_tag_busy_iter);
static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
{
@@ -628,6 +645,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
{
bt_free(&tags->bitmap_tags);
bt_free(&tags->breserved_tags);
+ free_cpumask_var(tags->cpumask);
kfree(tags);
}
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 9eb2cf4f01cb..d468a79f2c4a 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
+ void *priv);
enum {
BLK_MQ_TAG_CACHE_MIN = 1,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f2d67b4047a0..1c27b3eaef64 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -9,6 +9,7 @@
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/kmemleak.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -77,47 +78,13 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
}
-static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
-{
- while (true) {
- int ret;
-
- if (percpu_ref_tryget_live(&q->mq_usage_counter))
- return 0;
-
- if (!(gfp & __GFP_WAIT))
- return -EBUSY;
-
- ret = wait_event_interruptible(q->mq_freeze_wq,
- !atomic_read(&q->mq_freeze_depth) ||
- blk_queue_dying(q));
- if (blk_queue_dying(q))
- return -ENODEV;
- if (ret)
- return ret;
- }
-}
-
-static void blk_mq_queue_exit(struct request_queue *q)
-{
- percpu_ref_put(&q->mq_usage_counter);
-}
-
-static void blk_mq_usage_counter_release(struct percpu_ref *ref)
-{
- struct request_queue *q =
- container_of(ref, struct request_queue, mq_usage_counter);
-
- wake_up_all(&q->mq_freeze_wq);
-}
-
void blk_mq_freeze_queue_start(struct request_queue *q)
{
int freeze_depth;
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
if (freeze_depth == 1) {
- percpu_ref_kill(&q->mq_usage_counter);
+ percpu_ref_kill(&q->q_usage_counter);
blk_mq_run_hw_queues(q, false);
}
}
@@ -125,18 +92,34 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
static void blk_mq_freeze_queue_wait(struct request_queue *q)
{
- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
+ wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
}
/*
* Guarantee no request is in use, so we can change any data structure of
* the queue afterward.
*/
-void blk_mq_freeze_queue(struct request_queue *q)
+void blk_freeze_queue(struct request_queue *q)
{
+ /*
+ * In the !blk_mq case we are only calling this to kill the
+ * q_usage_counter, otherwise this increases the freeze depth
+ * and waits for it to return to zero. For this reason there is
+ * no blk_unfreeze_queue(), and blk_freeze_queue() is not
+ * exported to drivers as the only user for unfreeze is blk_mq.
+ */
blk_mq_freeze_queue_start(q);
blk_mq_freeze_queue_wait(q);
}
+
+void blk_mq_freeze_queue(struct request_queue *q)
+{
+ /*
+ * ...just an alias to keep freeze and unfreeze actions balanced
+ * in the blk_mq_* namespace
+ */
+ blk_freeze_queue(q);
+}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -146,7 +129,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) {
- percpu_ref_reinit(&q->mq_usage_counter);
+ percpu_ref_reinit(&q->q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
}
@@ -255,7 +238,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
struct blk_mq_alloc_data alloc_data;
int ret;
- ret = blk_mq_queue_enter(q, gfp);
+ ret = blk_queue_enter(q, gfp);
if (ret)
return ERR_PTR(ret);
@@ -278,7 +261,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
}
blk_mq_put_ctx(ctx);
if (!rq) {
- blk_mq_queue_exit(q);
+ blk_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK);
}
return rq;
@@ -297,7 +280,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
blk_mq_put_tag(hctx, tag, &ctx->last_tag);
- blk_mq_queue_exit(q);
+ blk_queue_exit(q);
}
void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@ -393,14 +376,16 @@ void __blk_mq_complete_request(struct request *rq)
* Ends all I/O on a request. It does not handle partial completions.
* The actual completion happens out-of-order, through a IPI handler.
**/
-void blk_mq_complete_request(struct request *rq)
+void blk_mq_complete_request(struct request *rq, int error)
{
struct request_queue *q = rq->q;
if (unlikely(blk_should_fake_timeout(q)))
return;
- if (!blk_mark_rq_complete(rq))
+ if (!blk_mark_rq_complete(rq)) {
+ rq->errors = error;
__blk_mq_complete_request(rq);
+ }
}
EXPORT_SYMBOL(blk_mq_complete_request);
@@ -616,10 +601,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
* If a request wasn't started before the queue was
* marked dying, kill it here or it'll go unnoticed.
*/
- if (unlikely(blk_queue_dying(rq->q))) {
- rq->errors = -EIO;
- blk_mq_complete_request(rq);
- }
+ if (unlikely(blk_queue_dying(rq->q)))
+ blk_mq_complete_request(rq, -EIO);
return;
}
if (rq->cmd_flags & REQ_NO_TIMEOUT)
@@ -641,24 +624,16 @@ static void blk_mq_rq_timer(unsigned long priv)
.next = 0,
.next_set = 0,
};
- struct blk_mq_hw_ctx *hctx;
int i;
- queue_for_each_hw_ctx(q, hctx, i) {
- /*
- * If not software queues are currently mapped to this
- * hardware queue, there's nothing to check
- */
- if (!blk_mq_hw_queue_mapped(hctx))
- continue;
-
- blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
- }
+ blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
if (data.next_set) {
data.next = blk_rq_timeout(round_jiffies_up(data.next));
mod_timer(&q->timeout, data.next);
} else {
+ struct blk_mq_hw_ctx *hctx;
+
queue_for_each_hw_ctx(q, hctx, i) {
/* the hctx may be unmapped, so check it here */
if (blk_mq_hw_queue_mapped(hctx))
@@ -997,18 +972,25 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
}
EXPORT_SYMBOL(blk_mq_delay_queue);
-static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
- struct request *rq, bool at_head)
+static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx,
+ struct request *rq,
+ bool at_head)
{
- struct blk_mq_ctx *ctx = rq->mq_ctx;
-
trace_block_rq_insert(hctx->queue, rq);
if (at_head)
list_add(&rq->queuelist, &ctx->rq_list);
else
list_add_tail(&rq->queuelist, &ctx->rq_list);
+}
+
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, bool at_head)
+{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+ __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
blk_mq_hctx_mark_pending(hctx, ctx);
}
@@ -1064,8 +1046,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
rq->mq_ctx = ctx;
- __blk_mq_insert_request(hctx, rq, false);
+ __blk_mq_insert_req_list(hctx, ctx, rq, false);
}
+ blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
blk_mq_run_hw_queue(hctx, from_schedule);
@@ -1147,7 +1130,7 @@ static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx,
struct request *rq, struct bio *bio)
{
- if (!hctx_allow_merges(hctx)) {
+ if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
blk_mq_bio_to_request(rq, bio);
spin_lock(&ctx->lock);
insert_rq:
@@ -1184,11 +1167,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
int rw = bio_data_dir(bio);
struct blk_mq_alloc_data alloc_data;
- if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
- bio_io_error(bio);
- return NULL;
- }
-
+ blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
@@ -1275,9 +1254,12 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio, q->bio_split);
- if (!is_flush_fua && !blk_queue_nomerges(q) &&
- blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
- return;
+ if (!is_flush_fua && !blk_queue_nomerges(q)) {
+ if (blk_attempt_plug_merge(q, bio, &request_count,
+ &same_queue_rq))
+ return;
+ } else
+ request_count = blk_plug_queued_count(q);
rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq))
@@ -1384,7 +1366,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
plug = current->plug;
if (plug) {
blk_mq_bio_to_request(rq, bio);
- if (list_empty(&plug->mq_list))
+ if (!request_count)
trace_block_plug(q);
else if (request_count >= BLK_MAX_REQUEST_COUNT) {
blk_flush_plug_list(plug, false);
@@ -1438,6 +1420,11 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
while (!list_empty(&tags->page_list)) {
page = list_first_entry(&tags->page_list, struct page, lru);
list_del_init(&page->lru);
+ /*
+ * Remove kmemleak object previously allocated in
+ * blk_mq_init_rq_map().
+ */
+ kmemleak_free(page_address(page));
__free_pages(page, page->private);
}
@@ -1510,6 +1497,11 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
list_add_tail(&page->lru, &tags->page_list);
p = page_address(page);
+ /*
+ * Allow kmemleak to scan these pages as they contain pointers
+ * to additional allocations like via ops->init_request().
+ */
+ kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
entries_per_page = order_to_size(this_order) / rq_size;
to_do = min(entries_per_page, set->queue_depth - i);
left -= to_do * rq_size;
@@ -1681,7 +1673,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
INIT_LIST_HEAD(&hctx->dispatch);
hctx->queue = q;
hctx->queue_num = hctx_idx;
- hctx->flags = set->flags;
+ hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
blk_mq_hctx_notify, hctx);
@@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
}
}
-static void blk_mq_map_swqueue(struct request_queue *q)
+static void blk_mq_map_swqueue(struct request_queue *q,
+ const struct cpumask *online_mask)
{
unsigned int i;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
+ /*
+ * Avoid others reading imcomplete hctx->cpumask through sysfs
+ */
+ mutex_lock(&q->sysfs_lock);
+
queue_for_each_hw_ctx(q, hctx, i) {
cpumask_clear(hctx->cpumask);
hctx->nr_ctx = 0;
@@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q)
*/
queue_for_each_ctx(q, ctx, i) {
/* If the cpu isn't online, the cpu is mapped to first hctx */
- if (!cpu_online(i))
+ if (!cpumask_test_cpu(i, online_mask))
continue;
hctx = q->mq_ops->map_queue(q, i);
cpumask_set_cpu(i, hctx->cpumask);
- cpumask_set_cpu(i, hctx->tags->cpumask);
ctx->index_hw = hctx->nr_ctx;
hctx->ctxs[hctx->nr_ctx++] = ctx;
}
+ mutex_unlock(&q->sysfs_lock);
+
queue_for_each_hw_ctx(q, hctx, i) {
struct blk_mq_ctxmap *map = &hctx->ctx_map;
@@ -1851,29 +1850,36 @@ static void blk_mq_map_swqueue(struct request_queue *q)
hctx->next_cpu = cpumask_first(hctx->cpumask);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
+
+ queue_for_each_ctx(q, ctx, i) {
+ if (!cpumask_test_cpu(i, online_mask))
+ continue;
+
+ hctx = q->mq_ops->map_queue(q, i);
+ cpumask_set_cpu(i, hctx->tags->cpumask);
+ }
}
-static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
+static void queue_set_hctx_shared(struct request_queue *q, bool shared)
{
struct blk_mq_hw_ctx *hctx;
- struct request_queue *q;
- bool shared;
int i;
- if (set->tag_list.next == set->tag_list.prev)
- shared = false;
- else
- shared = true;
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (shared)
+ hctx->flags |= BLK_MQ_F_TAG_SHARED;
+ else
+ hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+ }
+}
+
+static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
+{
+ struct request_queue *q;
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_freeze_queue(q);
-
- queue_for_each_hw_ctx(q, hctx, i) {
- if (shared)
- hctx->flags |= BLK_MQ_F_TAG_SHARED;
- else
- hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
- }
+ queue_set_hctx_shared(q, shared);
blk_mq_unfreeze_queue(q);
}
}
@@ -1884,7 +1890,12 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
mutex_lock(&set->tag_list_lock);
list_del_init(&q->tag_set_list);
- blk_mq_update_tag_set_depth(set);
+ if (list_is_singular(&set->tag_list)) {
+ /* just transitioned to unshared */
+ set->flags &= ~BLK_MQ_F_TAG_SHARED;
+ /* update existing queue */
+ blk_mq_update_tag_set_depth(set, false);
+ }
mutex_unlock(&set->tag_list_lock);
}
@@ -1894,8 +1905,17 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
q->tag_set = set;
mutex_lock(&set->tag_list_lock);
+
+ /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
+ if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
+ set->flags |= BLK_MQ_F_TAG_SHARED;
+ /* update existing queue */
+ blk_mq_update_tag_set_depth(set, true);
+ }
+ if (set->flags & BLK_MQ_F_TAG_SHARED)
+ queue_set_hctx_shared(q, true);
list_add_tail(&q->tag_set_list, &set->tag_list);
- blk_mq_update_tag_set_depth(set);
+
mutex_unlock(&set->tag_list_lock);
}
@@ -1918,6 +1938,9 @@ void blk_mq_release(struct request_queue *q)
kfree(hctx);
}
+ kfree(q->mq_map);
+ q->mq_map = NULL;
+
kfree(q->queue_hw_ctx);
/* ctx kobj stays in queue_ctx */
@@ -1979,14 +2002,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
hctxs[i]->queue_num = i;
}
- /*
- * Init percpu_ref in atomic mode so that it's faster to shutdown.
- * See blk_register_queue() for details.
- */
- if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
- PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
- goto err_hctxs;
-
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
@@ -2027,13 +2042,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (blk_mq_init_hw_queues(q, set))
goto err_hctxs;
+ get_online_cpus();
mutex_lock(&all_q_mutex);
- list_add_tail(&q->all_q_node, &all_q_list);
- mutex_unlock(&all_q_mutex);
+ list_add_tail(&q->all_q_node, &all_q_list);
blk_mq_add_queue_tag_set(set, q);
+ blk_mq_map_swqueue(q, cpu_online_mask);
- blk_mq_map_swqueue(q);
+ mutex_unlock(&all_q_mutex);
+ put_online_cpus();
return q;
@@ -2057,30 +2074,25 @@ void blk_mq_free_queue(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
+ mutex_lock(&all_q_mutex);
+ list_del_init(&q->all_q_node);
+ mutex_unlock(&all_q_mutex);
+
blk_mq_del_queue_tag_set(q);
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
blk_mq_free_hw_queues(q, set);
-
- percpu_ref_exit(&q->mq_usage_counter);
-
- kfree(q->mq_map);
-
- q->mq_map = NULL;
-
- mutex_lock(&all_q_mutex);
- list_del_init(&q->all_q_node);
- mutex_unlock(&all_q_mutex);
}
/* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q)
+static void blk_mq_queue_reinit(struct request_queue *q,
+ const struct cpumask *online_mask)
{
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
blk_mq_sysfs_unregister(q);
- blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
+ blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
/*
* redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
@@ -2088,7 +2100,7 @@ static void blk_mq_queue_reinit(struct request_queue *q)
* involves free and re-allocate memory, worthy doing?)
*/
- blk_mq_map_swqueue(q);
+ blk_mq_map_swqueue(q, online_mask);
blk_mq_sysfs_register(q);
}
@@ -2097,16 +2109,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
unsigned long action, void *hcpu)
{
struct request_queue *q;
+ int cpu = (unsigned long)hcpu;
+ /*
+ * New online cpumask which is going to be set in this hotplug event.
+ * Declare this cpumasks as global as cpu-hotplug operation is invoked
+ * one-by-one and dynamically allocating this could result in a failure.
+ */
+ static struct cpumask online_new;
/*
- * Before new mappings are established, hotadded cpu might already
- * start handling requests. This doesn't break anything as we map
- * offline CPUs to first hardware queue. We will re-init the queue
- * below to get optimal settings.
+ * Before hotadded cpu starts handling requests, new mappings must
+ * be established. Otherwise, these requests in hw queue might
+ * never be dispatched.
+ *
+ * For example, there is a single hw queue (hctx) and two CPU queues
+ * (ctx0 for CPU0, and ctx1 for CPU1).
+ *
+ * Now CPU1 is just onlined and a request is inserted into
+ * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
+ * still zero.
+ *
+ * And then while running hw queue, flush_busy_ctxs() finds bit0 is
+ * set in pending bitmap and tries to retrieve requests in
+ * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0,
+ * so the request in ctx1->rq_list is ignored.
*/
- if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
- action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ cpumask_copy(&online_new, cpu_online_mask);
+ break;
+ case CPU_UP_PREPARE:
+ cpumask_copy(&online_new, cpu_online_mask);
+ cpumask_set_cpu(cpu, &online_new);
+ break;
+ default:
return NOTIFY_OK;
+ }
mutex_lock(&all_q_mutex);
@@ -2130,7 +2169,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
}
list_for_each_entry(q, &all_q_list, all_q_node)
- blk_mq_queue_reinit(q);
+ blk_mq_queue_reinit(q, &online_new);
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_unfreeze_queue(q);
@@ -2260,10 +2299,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
int i;
for (i = 0; i < set->nr_hw_queues; i++) {
- if (set->tags[i]) {
+ if (set->tags[i])
blk_mq_free_rq_map(set, set->tags[i], i);
- free_cpumask_var(set->tags[i]->cpumask);
- }
}
kfree(set->tags);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 6a48c4c0d8a2..b44dce165761 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -29,8 +29,6 @@ void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
-void blk_mq_clone_flush_request(struct request *flush_rq,
- struct request *orig_rq);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
@@ -51,7 +49,8 @@ void blk_mq_disable_hotplug(void);
* CPU -> queue mappings
*/
extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
-extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
+extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
+ const struct cpumask *online_mask);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
/*
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3e44a9da2a13..31849e328b45 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -540,6 +540,7 @@ static void blk_release_queue(struct kobject *kobj)
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
+ bdi_exit(&q->backing_dev_info);
blkcg_exit_queue(q);
if (q->elevator) {
@@ -599,9 +600,8 @@ int blk_register_queue(struct gendisk *disk)
*/
if (!blk_queue_init_done(q)) {
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+ percpu_ref_switch_to_percpu(&q->q_usage_counter);
blk_queue_bypass_end(q);
- if (q->mq_ops)
- blk_mq_finish_init(q);
}
ret = blk_trace_init_sysfs(dev);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index c75a2636dd40..2149a1ddbacf 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -369,7 +369,7 @@ static void throtl_pd_init(struct blkg_policy_data *pd)
* regardless of the position of the group in the hierarchy.
*/
sq->parent_sq = &td->service_queue;
- if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent)
+ if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
tg->td = td;
}
diff --git a/block/blk.h b/block/blk.h
index 98614ad37c81..da722eb786df 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -72,6 +72,28 @@ void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
bool __blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes);
+int blk_queue_enter(struct request_queue *q, gfp_t gfp);
+void blk_queue_exit(struct request_queue *q);
+void blk_freeze_queue(struct request_queue *q);
+
+static inline void blk_queue_enter_live(struct request_queue *q)
+{
+ /*
+ * Given that running in generic_make_request() context
+ * guarantees that a live reference against q_usage_counter has
+ * been established, further references under that same context
+ * need not check that the queue has been frozen (marked dead).
+ */
+ percpu_ref_get(&q->q_usage_counter);
+}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+void blk_flush_integrity(void);
+#else
+static inline void blk_flush_integrity(void)
+{
+}
+#endif
void blk_rq_timed_out_timer(unsigned long data);
unsigned long blk_rq_timeout(unsigned long timeout);
@@ -86,6 +108,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count,
struct request **same_queue_rq);
+unsigned int blk_plug_queued_count(struct request_queue *q);
void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 04de88463a98..1f9093e901da 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1581,7 +1581,7 @@ static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
static void cfq_cpd_init(struct blkcg_policy_data *cpd)
{
struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
- unsigned int weight = cgroup_on_dfl(blkcg_root.css.cgroup) ?
+ unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
if (cpd_to_blkcg(cpd) == &blkcg_root)
@@ -1599,7 +1599,7 @@ static void cfq_cpd_free(struct blkcg_policy_data *cpd)
static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
{
struct blkcg *blkcg = cpd_to_blkcg(cpd);
- bool on_dfl = cgroup_on_dfl(blkcg_root.css.cgroup);
+ bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys);
unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
if (blkcg == &blkcg_root)
diff --git a/block/elevator.c b/block/elevator.c
index 84d63943f2de..c3555c9c672f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -420,7 +420,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
* noxmerges: Only simple one-hit cache try
* merges: All merge tries attempted
*/
- if (blk_queue_nomerges(q))
+ if (blk_queue_nomerges(q) || !bio_mergeable(bio))
return ELEVATOR_NO_MERGE;
/*
diff --git a/block/genhd.c b/block/genhd.c
index 0c706f33a599..e5cafa51567c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -630,6 +630,7 @@ void add_disk(struct gendisk *disk)
WARN_ON(retval);
disk_add_events(disk);
+ blk_integrity_add(disk);
}
EXPORT_SYMBOL(add_disk);
@@ -638,6 +639,7 @@ void del_gendisk(struct gendisk *disk)
struct disk_part_iter piter;
struct hd_struct *part;
+ blk_integrity_del(disk);
disk_del_events(disk);
/* invalidate stuff */
diff --git a/block/ioctl.c b/block/ioctl.c
index 8061eba42887..0918aed2d847 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -7,6 +7,7 @@
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/blktrace_api.h>
+#include <linux/pr.h>
#include <asm/uaccess.h>
static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
@@ -193,10 +194,20 @@ int blkdev_reread_part(struct block_device *bdev)
}
EXPORT_SYMBOL(blkdev_reread_part);
-static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
- uint64_t len, int secure)
+static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
+ unsigned long arg, unsigned long flags)
{
- unsigned long flags = 0;
+ uint64_t range[2];
+ uint64_t start, len;
+
+ if (!(mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(range, (void __user *)arg, sizeof(range)))
+ return -EFAULT;
+
+ start = range[0];
+ len = range[1];
if (start & 511)
return -EINVAL;
@@ -207,14 +218,24 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
if (start + len > (i_size_read(bdev->bd_inode) >> 9))
return -EINVAL;
- if (secure)
- flags |= BLKDEV_DISCARD_SECURE;
return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
}
-static int blk_ioctl_zeroout(struct block_device *bdev, uint64_t start,
- uint64_t len)
+static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
+ unsigned long arg)
{
+ uint64_t range[2];
+ uint64_t start, len;
+
+ if (!(mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(range, (void __user *)arg, sizeof(range)))
+ return -EFAULT;
+
+ start = range[0];
+ len = range[1];
+
if (start & 511)
return -EINVAL;
if (len & 511)
@@ -275,6 +296,96 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
*/
EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
+static int blkdev_pr_register(struct block_device *bdev,
+ struct pr_registration __user *arg)
+{
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ struct pr_registration reg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (!ops || !ops->pr_register)
+ return -EOPNOTSUPP;
+ if (copy_from_user(&reg, arg, sizeof(reg)))
+ return -EFAULT;
+
+ if (reg.flags & ~PR_FL_IGNORE_KEY)
+ return -EOPNOTSUPP;
+ return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags);
+}
+
+static int blkdev_pr_reserve(struct block_device *bdev,
+ struct pr_reservation __user *arg)
+{
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ struct pr_reservation rsv;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (!ops || !ops->pr_reserve)
+ return -EOPNOTSUPP;
+ if (copy_from_user(&rsv, arg, sizeof(rsv)))
+ return -EFAULT;
+
+ if (rsv.flags & ~PR_FL_IGNORE_KEY)
+ return -EOPNOTSUPP;
+ return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags);
+}
+
+static int blkdev_pr_release(struct block_device *bdev,
+ struct pr_reservation __user *arg)
+{
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ struct pr_reservation rsv;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (!ops || !ops->pr_release)
+ return -EOPNOTSUPP;
+ if (copy_from_user(&rsv, arg, sizeof(rsv)))
+ return -EFAULT;
+
+ if (rsv.flags)
+ return -EOPNOTSUPP;
+ return ops->pr_release(bdev, rsv.key, rsv.type);
+}
+
+static int blkdev_pr_preempt(struct block_device *bdev,
+ struct pr_preempt __user *arg, bool abort)
+{
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ struct pr_preempt p;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (!ops || !ops->pr_preempt)
+ return -EOPNOTSUPP;
+ if (copy_from_user(&p, arg, sizeof(p)))
+ return -EFAULT;
+
+ if (p.flags)
+ return -EOPNOTSUPP;
+ return ops->pr_preempt(bdev, p.old_key, p.new_key, p.type, abort);
+}
+
+static int blkdev_pr_clear(struct block_device *bdev,
+ struct pr_clear __user *arg)
+{
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ struct pr_clear c;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (!ops || !ops->pr_clear)
+ return -EOPNOTSUPP;
+ if (copy_from_user(&c, arg, sizeof(c)))
+ return -EFAULT;
+
+ if (c.flags)
+ return -EOPNOTSUPP;
+ return ops->pr_clear(bdev, c.key);
+}
+
/*
* Is it an unrecognized ioctl? The correct returns are either
* ENOTTY (final) or ENOIOCTLCMD ("I don't know this one, try a
@@ -295,89 +406,115 @@ static inline int is_unrecognized_ioctl(int ret)
ret == -ENOIOCTLCMD;
}
-/*
- * always keep this in sync with compat_blkdev_ioctl()
- */
-int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
- unsigned long arg)
+static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg)
{
- struct gendisk *disk = bdev->bd_disk;
- struct backing_dev_info *bdi;
- loff_t size;
- int ret, n;
- unsigned int max_sectors;
+ int ret;
- switch(cmd) {
- case BLKFLSBUF:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
- if (!is_unrecognized_ioctl(ret))
- return ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
- fsync_bdev(bdev);
- invalidate_bdev(bdev);
- return 0;
+ ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ if (!is_unrecognized_ioctl(ret))
+ return ret;
- case BLKROSET:
- ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
- if (!is_unrecognized_ioctl(ret))
- return ret;
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (get_user(n, (int __user *)(arg)))
- return -EFAULT;
- set_device_ro(bdev, n);
- return 0;
+ fsync_bdev(bdev);
+ invalidate_bdev(bdev);
+ return 0;
+}
- case BLKDISCARD:
- case BLKSECDISCARD: {
- uint64_t range[2];
+static int blkdev_roset(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg)
+{
+ int ret, n;
- if (!(mode & FMODE_WRITE))
- return -EBADF;
+ ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ if (!is_unrecognized_ioctl(ret))
+ return ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (get_user(n, (int __user *)arg))
+ return -EFAULT;
+ set_device_ro(bdev, n);
+ return 0;
+}
- if (copy_from_user(range, (void __user *)arg, sizeof(range)))
- return -EFAULT;
+static int blkdev_getgeo(struct block_device *bdev,
+ struct hd_geometry __user *argp)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct hd_geometry geo;
+ int ret;
- return blk_ioctl_discard(bdev, range[0], range[1],
- cmd == BLKSECDISCARD);
- }
- case BLKZEROOUT: {
- uint64_t range[2];
+ if (!argp)
+ return -EINVAL;
+ if (!disk->fops->getgeo)
+ return -ENOTTY;
+
+ /*
+ * We need to set the startsect first, the driver may
+ * want to override it.
+ */
+ memset(&geo, 0, sizeof(geo));
+ geo.start = get_start_sect(bdev);
+ ret = disk->fops->getgeo(bdev, &geo);
+ if (ret)
+ return ret;
+ if (copy_to_user(argp, &geo, sizeof(geo)))
+ return -EFAULT;
+ return 0;
+}
- if (!(mode & FMODE_WRITE))
- return -EBADF;
+/* set the logical block size */
+static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
+ int __user *argp)
+{
+ int ret, n;
- if (copy_from_user(range, (void __user *)arg, sizeof(range)))
- return -EFAULT;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!argp)
+ return -EINVAL;
+ if (get_user(n, argp))
+ return -EFAULT;
- return blk_ioctl_zeroout(bdev, range[0], range[1]);
+ if (!(mode & FMODE_EXCL)) {
+ bdgrab(bdev);
+ if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
+ return -EBUSY;
}
- case HDIO_GETGEO: {
- struct hd_geometry geo;
+ ret = set_blocksize(bdev, n);
+ if (!(mode & FMODE_EXCL))
+ blkdev_put(bdev, mode | FMODE_EXCL);
+ return ret;
+}
- if (!arg)
- return -EINVAL;
- if (!disk->fops->getgeo)
- return -ENOTTY;
-
- /*
- * We need to set the startsect first, the driver may
- * want to override it.
- */
- memset(&geo, 0, sizeof(geo));
- geo.start = get_start_sect(bdev);
- ret = disk->fops->getgeo(bdev, &geo);
- if (ret)
- return ret;
- if (copy_to_user((struct hd_geometry __user *)arg, &geo,
- sizeof(geo)))
- return -EFAULT;
- return 0;
- }
+/*
+ * always keep this in sync with compat_blkdev_ioctl()
+ */
+int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+ unsigned long arg)
+{
+ struct backing_dev_info *bdi;
+ void __user *argp = (void __user *)arg;
+ loff_t size;
+ unsigned int max_sectors;
+
+ switch (cmd) {
+ case BLKFLSBUF:
+ return blkdev_flushbuf(bdev, mode, cmd, arg);
+ case BLKROSET:
+ return blkdev_roset(bdev, mode, cmd, arg);
+ case BLKDISCARD:
+ return blk_ioctl_discard(bdev, mode, arg, 0);
+ case BLKSECDISCARD:
+ return blk_ioctl_discard(bdev, mode, arg,
+ BLKDEV_DISCARD_SECURE);
+ case BLKZEROOUT:
+ return blk_ioctl_zeroout(bdev, mode, arg);
+ case HDIO_GETGEO:
+ return blkdev_getgeo(bdev, argp);
case BLKRAGET:
case BLKFRAGET:
if (!arg)
@@ -414,28 +551,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
return 0;
case BLKBSZSET:
- /* set the logical block size */
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (!arg)
- return -EINVAL;
- if (get_user(n, (int __user *) arg))
- return -EFAULT;
- if (!(mode & FMODE_EXCL)) {
- bdgrab(bdev);
- if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
- return -EBUSY;
- }
- ret = set_blocksize(bdev, n);
- if (!(mode & FMODE_EXCL))
- blkdev_put(bdev, mode | FMODE_EXCL);
- return ret;
+ return blkdev_bszset(bdev, mode, argp);
case BLKPG:
- ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
- break;
+ return blkpg_ioctl(bdev, argp);
case BLKRRPART:
- ret = blkdev_reread_part(bdev);
- break;
+ return blkdev_reread_part(bdev);
case BLKGETSIZE:
size = i_size_read(bdev->bd_inode);
if ((size >> 9) > ~0UL)
@@ -447,11 +567,21 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKTRACESTOP:
case BLKTRACESETUP:
case BLKTRACETEARDOWN:
- ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg);
- break;
+ return blk_trace_ioctl(bdev, cmd, argp);
+ case IOC_PR_REGISTER:
+ return blkdev_pr_register(bdev, argp);
+ case IOC_PR_RESERVE:
+ return blkdev_pr_reserve(bdev, argp);
+ case IOC_PR_RELEASE:
+ return blkdev_pr_release(bdev, argp);
+ case IOC_PR_PREEMPT:
+ return blkdev_pr_preempt(bdev, argp, false);
+ case IOC_PR_PREEMPT_ABORT:
+ return blkdev_pr_preempt(bdev, argp, true);
+ case IOC_PR_CLEAR:
+ return blkdev_pr_clear(bdev, argp);
default:
- ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ return __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
- return ret;
}
EXPORT_SYMBOL_GPL(blkdev_ioctl);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index e7711133284e..3b030157ec85 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -428,6 +428,7 @@ rescan:
if (disk->fops->revalidate_disk)
disk->fops->revalidate_disk(disk);
+ blk_integrity_revalidate(disk);
check_disk_size_change(disk, bdev);
bdev->bd_invalidated = 0;
if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 24d6e9715318..2c97912335a9 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -160,38 +160,30 @@ static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
return t10_pi_verify(iter, t10_pi_ip_fn, 3);
}
-struct blk_integrity t10_pi_type1_crc = {
+struct blk_integrity_profile t10_pi_type1_crc = {
.name = "T10-DIF-TYPE1-CRC",
.generate_fn = t10_pi_type1_generate_crc,
.verify_fn = t10_pi_type1_verify_crc,
- .tuple_size = sizeof(struct t10_pi_tuple),
- .tag_size = 0,
};
EXPORT_SYMBOL(t10_pi_type1_crc);
-struct blk_integrity t10_pi_type1_ip = {
+struct blk_integrity_profile t10_pi_type1_ip = {
.name = "T10-DIF-TYPE1-IP",
.generate_fn = t10_pi_type1_generate_ip,
.verify_fn = t10_pi_type1_verify_ip,
- .tuple_size = sizeof(struct t10_pi_tuple),
- .tag_size = 0,
};
EXPORT_SYMBOL(t10_pi_type1_ip);
-struct blk_integrity t10_pi_type3_crc = {
+struct blk_integrity_profile t10_pi_type3_crc = {
.name = "T10-DIF-TYPE3-CRC",
.generate_fn = t10_pi_type3_generate_crc,
.verify_fn = t10_pi_type3_verify_crc,
- .tuple_size = sizeof(struct t10_pi_tuple),
- .tag_size = 0,
};
EXPORT_SYMBOL(t10_pi_type3_crc);
-struct blk_integrity t10_pi_type3_ip = {
+struct blk_integrity_profile t10_pi_type3_ip = {
.name = "T10-DIF-TYPE3-IP",
.generate_fn = t10_pi_type3_generate_ip,
.verify_fn = t10_pi_type3_verify_ip,
- .tuple_size = sizeof(struct t10_pi_tuple),
- .tag_size = 0,
};
EXPORT_SYMBOL(t10_pi_type3_ip);