ublk: scan partition in async way
Implement async partition scan to avoid IO hang when reading partition
tables. Similar to nvme_partition_scan_work(), partition scanning is
deferred to a work queue to prevent deadlocks.
When partition scan happens synchronously during add_disk(), IO errors
can cause the partition scan to wait while holding ub->mutex, which
can deadlock with other operations that need the mutex.
Changes:
- Add partition_scan_work to ublk_device structure
- Implement ublk_partition_scan_work() to perform async scan
- Always suppress sync partition scan during add_disk()
- Schedule async work after add_disk() for trusted daemons
- Add flush_work() in ublk_stop_dev() before grabbing ub->mutex
Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
Reported-by: Yoav Cohen <yoav@nvidia.com>
Closes: https://lore.kernel.org/linux-block/DM4PR12MB63280C5637917C071C2F0D65A9A8A@DM4PR12MB6328.namprd12.prod.outlook.com/
Fixes: 71f28f3136 ("ublk_drv: add io_uring based userspace block driver")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -237,6 +237,7 @@ struct ublk_device {
|
||||
bool canceling;
|
||||
pid_t ublksrv_tgid;
|
||||
struct delayed_work exit_work;
|
||||
struct work_struct partition_scan_work;
|
||||
|
||||
struct ublk_queue *queues[];
|
||||
};
|
||||
@@ -254,6 +255,20 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
|
||||
u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
|
||||
static inline unsigned int ublk_req_build_flags(struct request *req);
|
||||
|
||||
static void ublk_partition_scan_work(struct work_struct *work)
|
||||
{
|
||||
struct ublk_device *ub =
|
||||
container_of(work, struct ublk_device, partition_scan_work);
|
||||
|
||||
if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
|
||||
&ub->ub_disk->state)))
|
||||
return;
|
||||
|
||||
mutex_lock(&ub->ub_disk->open_mutex);
|
||||
bdev_disk_changed(ub->ub_disk, false);
|
||||
mutex_unlock(&ub->ub_disk->open_mutex);
|
||||
}
|
||||
|
||||
static inline struct ublksrv_io_desc *
|
||||
ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
|
||||
{
|
||||
@@ -2026,6 +2041,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
|
||||
mutex_lock(&ub->mutex);
|
||||
ublk_stop_dev_unlocked(ub);
|
||||
mutex_unlock(&ub->mutex);
|
||||
flush_work(&ub->partition_scan_work);
|
||||
ublk_cancel_dev(ub);
|
||||
}
|
||||
|
||||
@@ -2954,9 +2970,17 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
|
||||
|
||||
ublk_apply_params(ub);
|
||||
|
||||
/* don't probe partitions if any daemon task is un-trusted */
|
||||
if (ub->unprivileged_daemons)
|
||||
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
|
||||
/*
|
||||
* Suppress partition scan to avoid potential IO hang.
|
||||
*
|
||||
* If ublk server error occurs during partition scan, the IO may
|
||||
* wait while holding ub->mutex, which can deadlock with other
|
||||
* operations that need the mutex. Defer partition scan to async
|
||||
* work.
|
||||
* For unprivileged daemons, keep GD_SUPPRESS_PART_SCAN set
|
||||
* permanently.
|
||||
*/
|
||||
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
|
||||
|
||||
ublk_get_device(ub);
|
||||
ub->dev_info.state = UBLK_S_DEV_LIVE;
|
||||
@@ -2973,6 +2997,10 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
|
||||
|
||||
set_bit(UB_STATE_USED, &ub->state);
|
||||
|
||||
/* Schedule async partition scan for trusted daemons */
|
||||
if (!ub->unprivileged_daemons)
|
||||
schedule_work(&ub->partition_scan_work);
|
||||
|
||||
out_put_cdev:
|
||||
if (ret) {
|
||||
ublk_detach_disk(ub);
|
||||
@@ -3138,6 +3166,7 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
|
||||
mutex_init(&ub->mutex);
|
||||
spin_lock_init(&ub->lock);
|
||||
mutex_init(&ub->cancel_mutex);
|
||||
INIT_WORK(&ub->partition_scan_work, ublk_partition_scan_work);
|
||||
|
||||
ret = ublk_alloc_dev_number(ub, header->dev_id);
|
||||
if (ret < 0)
|
||||
|
||||
Reference in New Issue
Block a user