struct scrub_bio *wr_curr_bio;
struct mutex wr_lock;
int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
- atomic_t flush_all_writes;
struct btrfs_device *wr_tgtdev;
+ bool flush_all_writes;
/*
* statistics
WARN_ON(!fs_info->dev_replace.tgtdev);
sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
- atomic_set(&sctx->flush_all_writes, 0);
+ sctx->flush_all_writes = false;
}
return sctx;
if (ret)
return ret;
- wait_for_completion(&done.event);
+ wait_for_completion_io(&done.event);
if (done.status)
return -EIO;
struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
int ret;
- ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
+ ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
return !ret;
}
scrub_block_put(sblock);
- if (sctx->is_dev_replace &&
- atomic_read(&sctx->flush_all_writes)) {
+ if (sctx->is_dev_replace && sctx->flush_all_writes) {
mutex_lock(&sctx->wr_lock);
scrub_wr_submit(sctx);
mutex_unlock(&sctx->wr_lock);
sctx->first_free = sbio->index;
spin_unlock(&sctx->list_lock);
- if (sctx->is_dev_replace &&
- atomic_read(&sctx->flush_all_writes)) {
+ if (sctx->is_dev_replace && sctx->flush_all_writes) {
mutex_lock(&sctx->wr_lock);
scrub_wr_submit(sctx);
mutex_unlock(&sctx->wr_lock);
u64 start, u64 len)
{
u64 offset;
- int nsectors;
+ u64 nsectors64;
+ u32 nsectors;
int sectorsize = sparity->sctx->fs_info->sectorsize;
if (len >= sparity->stripe_len) {
start -= sparity->logic_start;
start = div64_u64_rem(start, sparity->stripe_len, &offset);
offset = div_u64(offset, sectorsize);
- nsectors = (int)len / sectorsize;
+ nsectors64 = div_u64(len, sectorsize);
+
+ ASSERT(nsectors64 < UINT_MAX);
+ nsectors = (u32)nsectors64;
if (offset + nsectors <= sparity->nsectors) {
bitmap_set(bitmap, offset, nsectors);
if (!sum)
return 0;
- index = ((u32)(logical - sum->bytenr)) / sctx->fs_info->sectorsize;
+ index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
+ ASSERT(index < UINT_MAX);
+
num_sectors = sum->len / sctx->fs_info->sectorsize;
memcpy(csum, sum->sums + index, sctx->csum_size);
if (index == num_sectors - 1) {
*/
if (atomic_read(&fs_info->scrub_pause_req)) {
/* push queued extents */
- atomic_set(&sctx->flush_all_writes, 1);
+ sctx->flush_all_writes = true;
scrub_submit(sctx);
mutex_lock(&sctx->wr_lock);
scrub_wr_submit(sctx);
mutex_unlock(&sctx->wr_lock);
wait_event(sctx->list_wait,
atomic_read(&sctx->bios_in_flight) == 0);
- atomic_set(&sctx->flush_all_writes, 0);
+ sctx->flush_all_writes = false;
scrub_blocked_if_needed(fs_info);
}
ro_set = 0;
} else {
btrfs_warn(fs_info,
- "failed setting block group ro, ret=%d\n",
- ret);
+ "failed setting block group ro: %d", ret);
btrfs_put_block_group(cache);
break;
}
* write requests are really completed when bios_in_flight
* changes to 0.
*/
- atomic_set(&sctx->flush_all_writes, 1);
+ sctx->flush_all_writes = true;
scrub_submit(sctx);
mutex_lock(&sctx->wr_lock);
scrub_wr_submit(sctx);
*/
wait_event(sctx->list_wait,
atomic_read(&sctx->workers_pending) == 0);
- atomic_set(&sctx->flush_all_writes, 0);
+ sctx->flush_all_writes = false;
scrub_pause_off(fs_info);
int max_active = fs_info->thread_pool_size;
if (fs_info->scrub_workers_refcnt == 0) {
- if (is_dev_replace)
- fs_info->scrub_workers =
- btrfs_alloc_workqueue(fs_info, "scrub", flags,
- 1, 4);
- else
- fs_info->scrub_workers =
- btrfs_alloc_workqueue(fs_info, "scrub", flags,
- max_active, 4);
+ fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
+ flags, is_dev_replace ? 1 : max_active, 4);
if (!fs_info->scrub_workers)
goto fail_scrub_workers;