summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/brd.c7
-rw-r--r--drivers/block/cciss.c16
-rw-r--r--drivers/block/drbd/drbd_bitmap.c26
-rw-r--r--drivers/block/drbd/drbd_debugfs.c4
-rw-r--r--drivers/block/drbd/drbd_int.h3
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c31
-rw-r--r--drivers/block/nbd.c1
-rw-r--r--drivers/block/null_blk.c136
-rw-r--r--drivers/block/rbd.c4
-rw-r--r--drivers/block/rsxx/core.c11
-rw-r--r--drivers/block/xen-blkback/blkback.c15
-rw-r--r--drivers/block/xen-blkback/common.h8
-rw-r--r--drivers/block/zram/zcomp.c24
-rw-r--r--drivers/block/zram/zcomp.h2
-rw-r--r--drivers/block/zram/zcomp_lz4.c15
-rw-r--r--drivers/block/zram/zcomp_lzo.c15
-rw-r--r--drivers/block/zram/zram_drv.c7
19 files changed, 160 insertions, 174 deletions
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index ad80c85e0857..d048d2009e89 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -964,9 +964,9 @@ aoecmd_sleepwork(struct work_struct *work)
ssize = get_capacity(d->gd);
bd = bdget_disk(d->gd, 0);
if (bd) {
- mutex_lock(&bd->bd_inode->i_mutex);
+ inode_lock(bd->bd_inode);
i_size_write(bd->bd_inode, (loff_t)ssize<<9);
- mutex_unlock(&bd->bd_inode->i_mutex);
+ inode_unlock(bd->bd_inode);
bdput(bd);
}
spin_lock_irq(&d->lock);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index a5880f4ab40e..cb27190e9f39 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -19,6 +19,9 @@
#include <linux/radix-tree.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#ifdef CONFIG_BLK_DEV_RAM_DAX
+#include <linux/pfn_t.h>
+#endif
#include <asm/uaccess.h>
@@ -378,7 +381,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
#ifdef CONFIG_BLK_DEV_RAM_DAX
static long brd_direct_access(struct block_device *bdev, sector_t sector,
- void __pmem **kaddr, unsigned long *pfn)
+ void __pmem **kaddr, pfn_t *pfn)
{
struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page;
@@ -389,7 +392,7 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector,
if (!page)
return -ENOSPC;
*kaddr = (void __pmem *)page_address(page);
- *pfn = page_to_pfn(page);
+ *pfn = page_to_pfn_t(page);
return PAGE_SIZE;
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 0422c47261c3..63c2064689f8 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -514,14 +514,9 @@ cciss_proc_write(struct file *file, const char __user *buf,
if (!buf || length > PAGE_SIZE - 1)
return -EINVAL;
- buffer = (char *)__get_free_page(GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- err = -EFAULT;
- if (copy_from_user(buffer, buf, length))
- goto out;
- buffer[length] = '\0';
+ buffer = memdup_user_nul(buf, length);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
#ifdef CONFIG_CISS_SCSI_TAPE
if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
@@ -537,8 +532,7 @@ cciss_proc_write(struct file *file, const char __user *buf,
/* might be nice to have "disengage" too, but it's not
safely possible. (only 1 module use count, lock issues.) */
-out:
- free_page((unsigned long)buffer);
+ kfree(buffer);
return err;
}
@@ -3854,7 +3848,7 @@ static void print_cfg_table(ctlr_info_t *h)
readl(&(tb->HostWrite.CoalIntDelay)));
dev_dbg(&h->pdev->dev, " Coalesce Interrupt Count = 0x%x\n",
readl(&(tb->HostWrite.CoalIntCount)));
- dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%d\n",
+ dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%x\n",
readl(&(tb->CmdsOutMax)));
dev_dbg(&h->pdev->dev, " Bus Types = 0x%x\n",
readl(&(tb->BusTypes)));
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 0dabc9b93725..92d6fc020a65 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -364,12 +364,9 @@ static void bm_free_pages(struct page **pages, unsigned long number)
}
}
-static void bm_vk_free(void *ptr, int v)
+static inline void bm_vk_free(void *ptr)
{
- if (v)
- vfree(ptr);
- else
- kfree(ptr);
+ kvfree(ptr);
}
/*
@@ -379,7 +376,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
{
struct page **old_pages = b->bm_pages;
struct page **new_pages, *page;
- unsigned int i, bytes, vmalloced = 0;
+ unsigned int i, bytes;
unsigned long have = b->bm_number_of_pages;
BUG_ON(have == 0 && old_pages != NULL);
@@ -401,7 +398,6 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
PAGE_KERNEL);
if (!new_pages)
return NULL;
- vmalloced = 1;
}
if (want >= have) {
@@ -411,7 +407,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
if (!page) {
bm_free_pages(new_pages + have, i - have);
- bm_vk_free(new_pages, vmalloced);
+ bm_vk_free(new_pages);
return NULL;
}
/* we want to know which page it is
@@ -427,11 +423,6 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
*/
}
- if (vmalloced)
- b->bm_flags |= BM_P_VMALLOCED;
- else
- b->bm_flags &= ~BM_P_VMALLOCED;
-
return new_pages;
}
@@ -469,7 +460,7 @@ void drbd_bm_cleanup(struct drbd_device *device)
if (!expect(device->bitmap))
return;
bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
- bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags));
+ bm_vk_free(device->bitmap->bm_pages);
kfree(device->bitmap);
device->bitmap = NULL;
}
@@ -643,7 +634,6 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
unsigned long want, have, onpages; /* number of pages */
struct page **npages, **opages = NULL;
int err = 0, growing;
- int opages_vmalloced;
if (!expect(b))
return -ENOMEM;
@@ -656,8 +646,6 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
if (capacity == b->bm_dev_capacity)
goto out;
- opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
-
if (capacity == 0) {
spin_lock_irq(&b->bm_lock);
opages = b->bm_pages;
@@ -671,7 +659,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
b->bm_dev_capacity = 0;
spin_unlock_irq(&b->bm_lock);
bm_free_pages(opages, onpages);
- bm_vk_free(opages, opages_vmalloced);
+ bm_vk_free(opages);
goto out;
}
bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
@@ -744,7 +732,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
spin_unlock_irq(&b->bm_lock);
if (opages != npages)
- bm_vk_free(opages, opages_vmalloced);
+ bm_vk_free(opages);
if (!growing)
b->bm_set = bm_count_bits(b);
drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index 96a0107a72ea..4de95bbff486 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -434,12 +434,12 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo
if (!parent || d_really_is_negative(parent))
goto out;
/* serialize with d_delete() */
- mutex_lock(&d_inode(parent)->i_mutex);
+ inode_lock(d_inode(parent));
/* Make sure the object is still alive */
if (simple_positive(file->f_path.dentry)
&& kref_get_unless_zero(kref))
ret = 0;
- mutex_unlock(&d_inode(parent)->i_mutex);
+ inode_unlock(d_inode(parent));
if (!ret) {
ret = single_open(file, show, data);
if (ret)
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index b6844feb9f9b..34bc84efc29e 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -536,9 +536,6 @@ struct drbd_bitmap; /* opaque for drbd_device */
/* definition of bits in bm_flags to be used in drbd_bm_lock
* and drbd_bitmap_io and friends. */
enum bm_flag {
- /* do we need to kfree, or vfree bm_pages? */
- BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
-
/* currently locked for bulk operation */
BM_LOCKED_MASK = 0xf,
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 331363e7de0f..9e251201dd48 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3585,7 +3585,7 @@ static void __init config_types(void)
unsigned int type = UDP->cmos;
struct floppy_drive_params *params;
const char *name = NULL;
- static char temparea[32];
+ char temparea[32];
if (type < ARRAY_SIZE(default_drive_params)) {
params = &default_drive_params[type].params;
@@ -3596,7 +3596,8 @@ static void __init config_types(void)
allowed_drive_mask &= ~(1 << drive);
} else {
params = &default_drive_params[0].params;
- sprintf(temparea, "unknown type %d (usb?)", type);
+ snprintf(temparea, sizeof(temparea),
+ "unknown type %d (usb?)", type);
name = temparea;
}
if (name) {
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 3457ac8c03e2..9b180dbbd03c 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -104,9 +104,9 @@
/* Device instance number, incremented each time a device is probed. */
static int instance;
-struct list_head online_list;
-struct list_head removing_list;
-spinlock_t dev_lock;
+static struct list_head online_list;
+static struct list_head removing_list;
+static spinlock_t dev_lock;
/*
* Global variable used to hold the major block device number
@@ -173,7 +173,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
{
struct request *rq;
- rq = blk_mq_alloc_request(dd->queue, 0, __GFP_RECLAIM, true);
+ rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
return blk_mq_rq_to_pdu(rq);
}
@@ -2029,13 +2029,10 @@ static int exec_drive_taskfile(struct driver_data *dd,
}
if (taskout) {
- outbuf = kzalloc(taskout, GFP_KERNEL);
- if (outbuf == NULL) {
- err = -ENOMEM;
- goto abort;
- }
- if (copy_from_user(outbuf, buf + outtotal, taskout)) {
- err = -EFAULT;
+ outbuf = memdup_user(buf + outtotal, taskout);
+ if (IS_ERR(outbuf)) {
+ err = PTR_ERR(outbuf);
+ outbuf = NULL;
goto abort;
}
outbuf_dma = pci_map_single(dd->pdev,
@@ -2050,14 +2047,10 @@ static int exec_drive_taskfile(struct driver_data *dd,
}
if (taskin) {
- inbuf = kzalloc(taskin, GFP_KERNEL);
- if (inbuf == NULL) {
- err = -ENOMEM;
- goto abort;
- }
-
- if (copy_from_user(inbuf, buf + intotal, taskin)) {
- err = -EFAULT;
+ inbuf = memdup_user(buf + intotal, taskin);
+ if (IS_ERR(inbuf)) {
+ err = PTR_ERR(inbuf);
+ inbuf = NULL;
goto abort;
}
inbuf_dma = pci_map_single(dd->pdev,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 93b3f99b6865..e4c5cc107934 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -827,6 +827,7 @@ static const struct block_device_operations nbd_fops =
{
.owner = THIS_MODULE,
.ioctl = nbd_ioctl,
+ .compat_ioctl = nbd_ioctl,
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 5c8ba5484d86..8ba1e97d573c 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -18,6 +18,7 @@ struct nullb_cmd {
struct bio *bio;
unsigned int tag;
struct nullb_queue *nq;
+ struct hrtimer timer;
};
struct nullb_queue {
@@ -49,17 +50,6 @@ static int null_major;
static int nullb_indexes;
static struct kmem_cache *ppa_cache;
-struct completion_queue {
- struct llist_head list;
- struct hrtimer timer;
-};
-
-/*
- * These are per-cpu for now, they will need to be configured by the
- * complete_queues parameter and appropriately mapped.
- */
-static DEFINE_PER_CPU(struct completion_queue, completion_queues);
-
enum {
NULL_IRQ_NONE = 0,
NULL_IRQ_SOFTIRQ = 1,
@@ -142,8 +132,8 @@ static const struct kernel_param_ops null_irqmode_param_ops = {
device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
-static int completion_nsec = 10000;
-module_param(completion_nsec, int, S_IRUGO);
+static unsigned long completion_nsec = 10000;
+module_param(completion_nsec, ulong, S_IRUGO);
MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
static int hw_queue_depth = 64;
@@ -180,6 +170,8 @@ static void free_cmd(struct nullb_cmd *cmd)
put_tag(cmd->nq, cmd->tag);
}
+static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
+
static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
{
struct nullb_cmd *cmd;
@@ -190,6 +182,11 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
cmd = &nq->cmds[tag];
cmd->tag = tag;
cmd->nq = nq;
+ if (irqmode == NULL_IRQ_TIMER) {
+ hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ cmd->timer.function = null_cmd_timer_expired;
+ }
return cmd;
}
@@ -220,6 +217,11 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
+ struct request_queue *q = NULL;
+
+ if (cmd->rq)
+ q = cmd->rq->q;
+
switch (queue_mode) {
case NULL_Q_MQ:
blk_mq_end_request(cmd->rq, 0);
@@ -234,51 +236,29 @@ static void end_cmd(struct nullb_cmd *cmd)
}
free_cmd(cmd);
+
+ /* Restart queue if needed, as we are freeing a tag */
+ if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{
- struct completion_queue *cq;
- struct llist_node *entry;
- struct nullb_cmd *cmd;
-
- cq = &per_cpu(completion_queues, smp_processor_id());
-
- while ((entry = llist_del_all(&cq->list)) != NULL) {
- entry = llist_reverse_order(entry);
- do {
- struct request_queue *q = NULL;
-
- cmd = container_of(entry, struct nullb_cmd, ll_list);
- entry = entry->next;
- if (cmd->rq)
- q = cmd->rq->q;
- end_cmd(cmd);
-
- if (q && !q->mq_ops && blk_queue_stopped(q)) {
- spin_lock(q->queue_lock);
- if (blk_queue_stopped(q))
- blk_start_queue(q);
- spin_unlock(q->queue_lock);
- }
- } while (entry);
- }
+ end_cmd(container_of(timer, struct nullb_cmd, timer));
return HRTIMER_NORESTART;
}
static void null_cmd_end_timer(struct nullb_cmd *cmd)
{
- struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
+ ktime_t kt = ktime_set(0, completion_nsec);
- cmd->ll_list.next = NULL;
- if (llist_add(&cmd->ll_list, &cq->list)) {
- ktime_t kt = ktime_set(0, completion_nsec);
-
- hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED);
- }
-
- put_cpu();
+ hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
}
static void null_softirq_done_fn(struct request *rq)
@@ -376,6 +356,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
{
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ if (irqmode == NULL_IRQ_TIMER) {
+ hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cmd->timer.function = null_cmd_timer_expired;
+ }
cmd->rq = bd->rq;
cmd->nq = hctx->driver_data;
@@ -452,19 +436,19 @@ static void null_del_dev(struct nullb *nullb)
static void null_lnvm_end_io(struct request *rq, int error)
{
struct nvm_rq *rqd = rq->end_io_data;
- struct nvm_dev *dev = rqd->dev;
- dev->mt->end_io(rqd, error);
+ nvm_end_io(rqd, error);
blk_put_request(rq);
}
-static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
+static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
+ struct request_queue *q = dev->q;
struct request *rq;
struct bio *bio = rqd->bio;
- rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
+ rq = blk_mq_alloc_request(q, bio_rw(bio), 0);
if (IS_ERR(rq))
return -ENOMEM;
@@ -485,7 +469,7 @@ static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
return 0;
}
-static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
+static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
{
sector_t size = gb * 1024 * 1024 * 1024ULL;
sector_t blksize;
@@ -510,17 +494,17 @@ static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
id->ppaf.ch_offset = 56;
id->ppaf.ch_len = 8;
- do_div(size, bs); /* convert size to pages */
- do_div(size, 256); /* concert size to pgs pr blk */
+ sector_div(size, bs); /* convert size to pages */
+ size >>= 8; /* concert size to pgs pr blk */
grp = &id->groups[0];
grp->mtype = 0;
grp->fmtype = 0;
grp->num_ch = 1;
grp->num_pg = 256;
blksize = size;
- do_div(size, (1 << 16));
+ size >>= 16;
grp->num_lun = size + 1;
- do_div(blksize, grp->num_lun);
+ sector_div(blksize, grp->num_lun);
grp->num_blk = blksize;
grp->num_pln = 1;
@@ -538,7 +522,7 @@ static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
return 0;
}
-static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
+static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
{
mempool_t *virtmem_pool;
@@ -556,7 +540,7 @@ static void null_lnvm_destroy_dma_pool(void *pool)
mempool_destroy(pool);
}
-static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
+static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
gfp_t mem_flags, dma_addr_t *dma_handler)
{
return mempool_alloc(pool, mem_flags);
@@ -780,7 +764,9 @@ out:
static int __init null_init(void)
{
+ int ret = 0;
unsigned int i;
+ struct nullb *nullb;
if (bs > PAGE_SIZE) {
pr_warn("null_blk: invalid block size\n");
@@ -813,19 +799,6 @@ static int __init null_init(void)
mutex_init(&lock);
- /* Initialize a separate list for each CPU for issuing softirqs */
- for_each_possible_cpu(i) {
- struct completion_queue *cq = &per_cpu(completion_queues, i);
-
- init_llist_head(&cq->list);
-
- if (irqmode != NULL_IRQ_TIMER)
- continue;
-
- hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cq->timer.function = null_cmd_timer_expired;
- }
-
null_major = register_blkdev(0, "nullb");
if (null_major < 0)
return null_major;
@@ -835,22 +808,29 @@ static int __init null_init(void)
0, 0, NULL);
if (!ppa_cache) {
pr_err("null_blk: unable to create ppa cache\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_ppa;
}
}
for (i = 0; i < nr_devices; i++) {
- if (null_add_dev()) {
- unregister_blkdev(null_major, "nullb");
- goto err_ppa;
- }
+ ret = null_add_dev();
+ if (ret)
+ goto err_dev;
}
pr_info("null: module loaded\n");
return 0;
-err_ppa:
+
+err_dev:
+ while (!list_empty(&nullb_list)) {
+ nullb = list_entry(nullb_list.next, struct nullb, list);
+ null_del_dev(nullb);
+ }
kmem_cache_destroy(ppa_cache);
- return -EINVAL;
+err_ppa:
+ unregister_blkdev(null_major, "nullb");
+ return ret;
}
static void __exit null_exit(void)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 235708c7c46e..4a876785b68c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3442,6 +3442,7 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
img_request->rq = rq;
+ snapc = NULL; /* img_request consumes a ref */
if (op_type == OBJ_OP_DISCARD)
result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
@@ -5184,8 +5185,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
out_err:
rbd_dev_unparent(rbd_dev);
- if (parent)
- rbd_dev_destroy(parent);
+ rbd_dev_destroy(parent);
return ret;
}
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index d8b2488aaade..34997df132e2 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -203,14 +203,11 @@ static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf,
char *buf;
ssize_t st;
- buf = kzalloc(cnt, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ buf = memdup_user(ubuf, cnt);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
- st = copy_from_user(buf, ubuf, cnt);
- if (!st)
- st = rsxx_creg_write(card, CREG_ADD_CRAM + (u32)*ppos, cnt,
- buf, 1);
+ st = rsxx_creg_write(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
kfree(buf);
if (st)
return st;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 148930c8c121..4809c1501d7e 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -968,6 +968,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
goto unmap;
for (n = 0, i = 0; n < nseg; n++) {
+ uint8_t first_sect, last_sect;
+
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
/* Map indirect segments */
if (segments)
@@ -975,15 +977,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
}
i = n % SEGS_PER_INDIRECT_FRAME;
+
pending_req->segments[n]->gref = segments[i].gref;
- seg[n].nsec = segments[i].last_sect -
- segments[i].first_sect + 1;
- seg[n].offset = (segments[i].first_sect << 9);
- if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
- (segments[i].last_sect < segments[i].first_sect)) {
+
+ first_sect = READ_ONCE(segments[i].first_sect);
+ last_sect = READ_ONCE(segments[i].last_sect);
+ if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
rc = -EINVAL;
goto unmap;
}
+
+ seg[n].nsec = last_sect - first_sect + 1;
+ seg[n].offset = first_sect << 9;
preq->nr_sects += seg[n].nsec;
}
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index b27c5ba15600..dea61f6ab8cb 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -420,8 +420,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
- dst->operation = src->operation;
- switch (src->operation) {
+ dst->operation = READ_ONCE(src->operation);
+ switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
@@ -468,8 +468,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
struct blkif_x86_64_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
- dst->operation = src->operation;
- switch (src->operation) {
+ dst->operation = READ_ONCE(src->operation);
+ switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 5cb13ca3a3ac..3ef42e563bb5 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -74,18 +74,18 @@ static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
* allocate new zcomp_strm structure with ->private initialized by
* backend, return NULL on error
*/
-static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
+static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp, gfp_t flags)
{
- struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
+ struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), flags);
if (!zstrm)
return NULL;
- zstrm->private = comp->backend->create();
+ zstrm->private = comp->backend->create(flags);
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
- zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ zstrm->buffer = (void *)__get_free_pages(flags | __GFP_ZERO, 1);
if (!zstrm->private || !zstrm->buffer) {
zcomp_strm_free(comp, zstrm);
zstrm = NULL;
@@ -120,8 +120,16 @@ static struct zcomp_strm *zcomp_strm_multi_find(struct zcomp *comp)
/* allocate new zstrm stream */
zs->avail_strm++;
spin_unlock(&zs->strm_lock);
-
- zstrm = zcomp_strm_alloc(comp);
+ /*
+ * This function can be called in swapout/fs write path
+ * so we can't use GFP_FS|IO. And it assumes we already
+ * have at least one stream in zram initialization so we
+ * don't do best effort to allocate more stream in here.
+ * A default stream will work well without further multiple
+ * streams. That's why we use NORETRY | NOWARN.
+ */
+ zstrm = zcomp_strm_alloc(comp, GFP_NOIO | __GFP_NORETRY |
+ __GFP_NOWARN);
if (!zstrm) {
spin_lock(&zs->strm_lock);
zs->avail_strm--;
@@ -209,7 +217,7 @@ static int zcomp_strm_multi_create(struct zcomp *comp, int max_strm)
zs->max_strm = max_strm;
zs->avail_strm = 1;
- zstrm = zcomp_strm_alloc(comp);
+ zstrm = zcomp_strm_alloc(comp, GFP_KERNEL);
if (!zstrm) {
kfree(zs);
return -ENOMEM;
@@ -259,7 +267,7 @@ static int zcomp_strm_single_create(struct zcomp *comp)
comp->stream = zs;
mutex_init(&zs->strm_lock);
- zs->zstrm = zcomp_strm_alloc(comp);
+ zs->zstrm = zcomp_strm_alloc(comp, GFP_KERNEL);
if (!zs->zstrm) {
kfree(zs);
return -ENOMEM;
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 46e2b9f8f1f0..b7d2a4bcae54 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -33,7 +33,7 @@ struct zcomp_backend {
int (*decompress)(const unsigned char *src, size_t src_len,
unsigned char *dst);
- void *(*create)(void);
+ void *(*create)(gfp_t flags);
void (*destroy)(void *private);
const char *name;
diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c
index f2afb7e988c3..0110086accba 100644
--- a/drivers/block/zram/zcomp_lz4.c
+++ b/drivers/block/zram/zcomp_lz4.c
@@ -10,17 +10,26 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/lz4.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include "zcomp_lz4.h"
-static void *zcomp_lz4_create(void)
+static void *zcomp_lz4_create(gfp_t flags)
{
- return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
+ void *ret;
+
+ ret = kmalloc(LZ4_MEM_COMPRESS, flags);
+ if (!ret)
+ ret = __vmalloc(LZ4_MEM_COMPRESS,
+ flags | __GFP_HIGHMEM,
+ PAGE_KERNEL);
+ return ret;
}
static void zcomp_lz4_destroy(void *private)
{
- kfree(private);
+ kvfree(private);
}
static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst,
diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c
index da1bc47d588e..ed7a1f0549ec 100644
--- a/drivers/block/zram/zcomp_lzo.c
+++ b/drivers/block/zram/zcomp_lzo.c
@@ -10,17 +10,26 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/lzo.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include "zcomp_lzo.h"
-static void *lzo_create(void)
+static void *lzo_create(gfp_t flags)
{
- return kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ void *ret;
+
+ ret = kmalloc(LZO1X_MEM_COMPRESS, flags);
+ if (!ret)
+ ret = __vmalloc(LZO1X_MEM_COMPRESS,
+ flags | __GFP_HIGHMEM,
+ PAGE_KERNEL);
+ return ret;
}
static void lzo_destroy(void *private)
{
- kfree(private);
+ kvfree(private);
}
static int lzo_compress(const unsigned char *src, unsigned char *dst,
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 47915d736f8d..370c2f76016d 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1325,7 +1325,6 @@ static int zram_remove(struct zram *zram)
pr_info("Removed device: %s\n", zram->disk->disk_name);
- idr_remove(&zram_index_idr, zram->disk->first_minor);
blk_cleanup_queue(zram->disk->queue);
del_gendisk(zram->disk);
put_disk(zram->disk);
@@ -1367,10 +1366,12 @@ static ssize_t hot_remove_store(struct class *class,
mutex_lock(&zram_index_mutex);
zram = idr_find(&zram_index_idr, dev_id);
- if (zram)
+ if (zram) {
ret = zram_remove(zram);
- else
+ idr_remove(&zram_index_idr, dev_id);
+ } else {
ret = -ENODEV;
+ }
mutex_unlock(&zram_index_mutex);
return ret ? ret : count;