aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/blk-integrity.c23
-rw-r--r--block/blk-mq.c3
-rw-r--r--block/blk-rq-qos.h25
-rw-r--r--drivers/block/loop.c45
-rw-r--r--drivers/block/ublk_drv.c37
5 files changed, 83 insertions, 50 deletions
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 9b27963680dc..964eebbee14d 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -140,14 +140,21 @@ EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
struct request *next)
{
+ struct bio_integrity_payload *bip, *bip_next;
+
if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
return true;
if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
return false;
- if (bio_integrity(req->bio)->bip_flags !=
- bio_integrity(next->bio)->bip_flags)
+ bip = bio_integrity(req->bio);
+ bip_next = bio_integrity(next->bio);
+ if (bip->bip_flags != bip_next->bip_flags)
+ return false;
+
+ if (bip->bip_flags & BIP_CHECK_APPTAG &&
+ bip->app_tag != bip_next->app_tag)
return false;
if (req->nr_integrity_segments + next->nr_integrity_segments >
@@ -163,15 +170,21 @@ bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
struct bio *bio)
{
+ struct bio_integrity_payload *bip, *bip_bio = bio_integrity(bio);
int nr_integrity_segs;
- if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
+ if (blk_integrity_rq(req) == 0 && bip_bio == NULL)
return true;
- if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
+ if (blk_integrity_rq(req) == 0 || bip_bio == NULL)
+ return false;
+
+ bip = bio_integrity(req->bio);
+ if (bip->bip_flags != bip_bio->bip_flags)
return false;
- if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
+ if (bip->bip_flags & BIP_CHECK_APPTAG &&
+ bip->app_tag != bip_bio->app_tag)
return false;
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index eff4f72ce83b..a29d8ac9d3e3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4553,8 +4553,7 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
* Make sure reading the old queue_hw_ctx from other
* context concurrently won't trigger uaf.
*/
- synchronize_rcu_expedited();
- kfree(hctxs);
+ kfree_rcu_mightsleep(hctxs);
hctxs = new_hctxs;
}
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index b538f2c0febc..a747a504fe42 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -112,29 +112,26 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos)
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_cleanup(q->rq_qos, bio);
}
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos && !blk_rq_is_passthrough(rq))
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) &&
+ q->rq_qos && !blk_rq_is_passthrough(rq))
__rq_qos_done(q->rq_qos, rq);
}
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos)
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_issue(q->rq_qos, rq);
}
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos)
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_requeue(q->rq_qos, rq);
}
@@ -162,8 +159,7 @@ static inline void rq_qos_done_bio(struct bio *bio)
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos) {
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
bio_set_flag(bio, BIO_QOS_THROTTLED);
__rq_qos_throttle(q->rq_qos, bio);
}
@@ -172,16 +168,14 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos)
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_track(q->rq_qos, rq, bio);
}
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos) {
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
bio_set_flag(bio, BIO_QOS_MERGED);
__rq_qos_merge(q->rq_qos, rq, bio);
}
@@ -189,8 +183,7 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos)
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_queue_depth_changed(q->rq_qos);
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 32a3a5b13802..bd59c0e9508b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1225,16 +1225,28 @@ static int loop_clr_fd(struct loop_device *lo)
}
static int
-loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
+loop_set_status(struct loop_device *lo, blk_mode_t mode,
+ struct block_device *bdev, const struct loop_info64 *info)
{
int err;
bool partscan = false;
bool size_changed = false;
unsigned int memflags;
+ /*
+ * If we don't hold exclusive handle for the device, upgrade to it
+ * here to avoid changing device under exclusive owner.
+ */
+ if (!(mode & BLK_OPEN_EXCL)) {
+ err = bd_prepare_to_claim(bdev, loop_set_status, NULL);
+ if (err)
+ goto out_reread_partitions;
+ }
+
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
- return err;
+ goto out_abort_claiming;
+
if (lo->lo_state != Lo_bound) {
err = -ENXIO;
goto out_unlock;
@@ -1273,6 +1285,10 @@ out_unfreeze:
}
out_unlock:
mutex_unlock(&lo->lo_mutex);
+out_abort_claiming:
+ if (!(mode & BLK_OPEN_EXCL))
+ bd_abort_claiming(bdev, loop_set_status);
+out_reread_partitions:
if (partscan)
loop_reread_partitions(lo);
@@ -1352,7 +1368,9 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
}
static int
-loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
+loop_set_status_old(struct loop_device *lo, blk_mode_t mode,
+ struct block_device *bdev,
+ const struct loop_info __user *arg)
{
struct loop_info info;
struct loop_info64 info64;
@@ -1360,17 +1378,19 @@ loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
if (copy_from_user(&info, arg, sizeof (struct loop_info)))
return -EFAULT;
loop_info64_from_old(&info, &info64);
- return loop_set_status(lo, &info64);
+ return loop_set_status(lo, mode, bdev, &info64);
}
static int
-loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
+loop_set_status64(struct loop_device *lo, blk_mode_t mode,
+ struct block_device *bdev,
+ const struct loop_info64 __user *arg)
{
struct loop_info64 info64;
if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
return -EFAULT;
- return loop_set_status(lo, &info64);
+ return loop_set_status(lo, mode, bdev, &info64);
}
static int
@@ -1549,14 +1569,14 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
case LOOP_SET_STATUS:
err = -EPERM;
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
- err = loop_set_status_old(lo, argp);
+ err = loop_set_status_old(lo, mode, bdev, argp);
break;
case LOOP_GET_STATUS:
return loop_get_status_old(lo, argp);
case LOOP_SET_STATUS64:
err = -EPERM;
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
- err = loop_set_status64(lo, argp);
+ err = loop_set_status64(lo, mode, bdev, argp);
break;
case LOOP_GET_STATUS64:
return loop_get_status64(lo, argp);
@@ -1650,8 +1670,9 @@ loop_info64_to_compat(const struct loop_info64 *info64,
}
static int
-loop_set_status_compat(struct loop_device *lo,
- const struct compat_loop_info __user *arg)
+loop_set_status_compat(struct loop_device *lo, blk_mode_t mode,
+ struct block_device *bdev,
+ const struct compat_loop_info __user *arg)
{
struct loop_info64 info64;
int ret;
@@ -1659,7 +1680,7 @@ loop_set_status_compat(struct loop_device *lo,
ret = loop_info64_from_compat(arg, &info64);
if (ret < 0)
return ret;
- return loop_set_status(lo, &info64);
+ return loop_set_status(lo, mode, bdev, &info64);
}
static int
@@ -1685,7 +1706,7 @@ static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
switch(cmd) {
case LOOP_SET_STATUS:
- err = loop_set_status_compat(lo,
+ err = loop_set_status_compat(lo, mode, bdev,
(const struct compat_loop_info __user *)arg);
break;
case LOOP_GET_STATUS:
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 837fedb02e0d..f6e5a0766721 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -255,20 +255,6 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
-static void ublk_partition_scan_work(struct work_struct *work)
-{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, partition_scan_work);
-
- if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
- &ub->ub_disk->state)))
- return;
-
- mutex_lock(&ub->ub_disk->open_mutex);
- bdev_disk_changed(ub->ub_disk, false);
- mutex_unlock(&ub->ub_disk->open_mutex);
-}
-
static inline struct ublksrv_io_desc *
ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
{
@@ -1597,6 +1583,27 @@ static void ublk_put_disk(struct gendisk *disk)
put_device(disk_to_dev(disk));
}
+static void ublk_partition_scan_work(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, partition_scan_work);
+ /* Hold disk reference to prevent UAF during concurrent teardown */
+ struct gendisk *disk = ublk_get_disk(ub);
+
+ if (!disk)
+ return;
+
+ if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
+ &disk->state)))
+ goto out;
+
+ mutex_lock(&disk->open_mutex);
+ bdev_disk_changed(disk, false);
+ mutex_unlock(&disk->open_mutex);
+out:
+ ublk_put_disk(disk);
+}
+
/*
* Use this function to ensure that ->canceling is consistently set for
* the device and all queues. Do not set these flags directly.
@@ -2041,7 +2048,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
mutex_lock(&ub->mutex);
ublk_stop_dev_unlocked(ub);
mutex_unlock(&ub->mutex);
- flush_work(&ub->partition_scan_work);
+ cancel_work_sync(&ub->partition_scan_work);
ublk_cancel_dev(ub);
}