diff options
Diffstat (limited to 'drivers')
348 files changed, 3962 insertions, 2465 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index c67f6f5ad611..36b0e61f9c09 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -30,6 +30,10 @@ #include <linux/types.h> #include <linux/dmi.h> #include <linux/delay.h> +#ifdef CONFIG_ACPI_PROCFS_POWER +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#endif #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/acpi.h> @@ -52,6 +56,7 @@ MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI AC Adapter Driver"); MODULE_LICENSE("GPL"); + static int acpi_ac_add(struct acpi_device *device); static int acpi_ac_remove(struct acpi_device *device); static void acpi_ac_notify(struct acpi_device *device, u32 event); @@ -67,6 +72,13 @@ static int acpi_ac_resume(struct device *dev); #endif static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); +#ifdef CONFIG_ACPI_PROCFS_POWER +extern struct proc_dir_entry *acpi_lock_ac_dir(void); +extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); +static int acpi_ac_open_fs(struct inode *inode, struct file *file); +#endif + + static int ac_sleep_before_get_state_ms; static struct acpi_driver acpi_ac_driver = { @@ -91,6 +103,16 @@ struct acpi_ac { #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger) +#ifdef CONFIG_ACPI_PROCFS_POWER +static const struct file_operations acpi_ac_fops = { + .owner = THIS_MODULE, + .open = acpi_ac_open_fs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + /* -------------------------------------------------------------------------- AC Adapter Management -------------------------------------------------------------------------- */ @@ -143,6 +165,83 @@ static enum power_supply_property ac_props[] = { POWER_SUPPLY_PROP_ONLINE, }; +#ifdef CONFIG_ACPI_PROCFS_POWER +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +static struct proc_dir_entry *acpi_ac_dir; + +static int acpi_ac_seq_show(struct seq_file *seq, void *offset) +{ + struct acpi_ac *ac = seq->private; + + + if (!ac) + return 0; + + if (acpi_ac_get_state(ac)) { + seq_puts(seq, "ERROR: Unable to read AC Adapter state\n"); + return 0; + } + + seq_puts(seq, "state: "); + switch (ac->state) { + case ACPI_AC_STATUS_OFFLINE: + seq_puts(seq, "off-line\n"); + break; + case ACPI_AC_STATUS_ONLINE: + seq_puts(seq, "on-line\n"); + break; + default: + seq_puts(seq, "unknown\n"); + break; + } + + return 0; +} + +static int acpi_ac_open_fs(struct inode *inode, struct file *file) +{ + return single_open(file, acpi_ac_seq_show, PDE_DATA(inode)); +} + +static int acpi_ac_add_fs(struct acpi_ac *ac) +{ + struct proc_dir_entry *entry = NULL; + + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded," + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); + if (!acpi_device_dir(ac->device)) { + acpi_device_dir(ac->device) = + proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir); + if (!acpi_device_dir(ac->device)) + return -ENODEV; + } + + /* 'state' [R] */ + entry = proc_create_data(ACPI_AC_FILE_STATE, + S_IRUGO, acpi_device_dir(ac->device), + &acpi_ac_fops, ac); + if (!entry) + return -ENODEV; + return 0; +} + +static int acpi_ac_remove_fs(struct acpi_ac *ac) +{ + + if (acpi_device_dir(ac->device)) { + remove_proc_entry(ACPI_AC_FILE_STATE, + acpi_device_dir(ac->device)); + remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir); + acpi_device_dir(ac->device) = NULL; + } + + return 0; +} +#endif + /* -------------------------------------------------------------------------- Driver Model -------------------------------------------------------------------------- */ @@ -243,6 +342,11 @@ static int acpi_ac_add(struct acpi_device *device) goto end; ac->charger.name = acpi_device_bid(device); +#ifdef CONFIG_ACPI_PROCFS_POWER + result = acpi_ac_add_fs(ac); + if (result) + goto end; +#endif ac->charger.type = POWER_SUPPLY_TYPE_MAINS; ac->charger.properties = ac_props; ac->charger.num_properties = ARRAY_SIZE(ac_props); @@ -258,8 +362,12 @@ static int acpi_ac_add(struct acpi_device *device) ac->battery_nb.notifier_call = acpi_ac_battery_notify; register_acpi_notifier(&ac->battery_nb); end: - if (result) + if (result) { +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_ac_remove_fs(ac); +#endif kfree(ac); + } dmi_check_system(ac_dmi_table); return result; @@ -303,6 +411,10 @@ static int acpi_ac_remove(struct acpi_device *device) power_supply_unregister(&ac->charger); unregister_acpi_notifier(&ac->battery_nb); +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_ac_remove_fs(ac); +#endif + kfree(ac); return 0; @@ -315,9 +427,20 @@ static int __init acpi_ac_init(void) if (acpi_disabled) return -ENODEV; +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_ac_dir = acpi_lock_ac_dir(); + if (!acpi_ac_dir) + return -ENODEV; +#endif + + result = acpi_bus_register_driver(&acpi_ac_driver); - if (result < 0) + if (result < 0) { +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_unlock_ac_dir(acpi_ac_dir); +#endif return -ENODEV; + } return 0; } @@ -325,6 +448,9 @@ static int __init acpi_ac_init(void) static void __exit acpi_ac_exit(void) { acpi_bus_unregister_driver(&acpi_ac_driver); +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_unlock_ac_dir(acpi_ac_dir); +#endif } module_init(acpi_ac_init); module_exit(acpi_ac_exit); diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index 6703c1fd993a..4ddb0dca56f6 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c @@ -14,6 +14,8 @@ #include <linux/module.h> static const struct acpi_device_id acpi_pnp_device_ids[] = { + /* soc_button_array */ + {"PNP0C40"}, /* pata_isapnp */ {"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */ /* floppy */ diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 0d7116f34b95..130f513e08c9 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -35,6 +35,7 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/suspend.h> +#include <linux/delay.h> #include <asm/unaligned.h> #ifdef CONFIG_ACPI_PROCFS_POWER @@ -534,6 +535,20 @@ static int acpi_battery_get_state(struct acpi_battery *battery) " invalid.\n"); } + /* + * When fully charged, some batteries wrongly report + * capacity_now = design_capacity instead of = full_charge_capacity + */ + if (battery->capacity_now > battery->full_charge_capacity + && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) { + battery->capacity_now = battery->full_charge_capacity; + if (battery->capacity_now != battery->design_capacity) + printk_once(KERN_WARNING FW_BUG + "battery: reported current charge level (%d) " + "is higher than reported maximum charge level (%d).\n", + battery->capacity_now, battery->full_charge_capacity); + } + if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) && battery->capacity_now >= 0 && battery->capacity_now <= 100) battery->capacity_now = (battery->capacity_now * @@ -1151,6 +1166,28 @@ static struct dmi_system_id bat_dmi_table[] = { {}, }; +/* + * Some machines'(E,G Lenovo Z480) ECs are not stable + * during boot up and this causes battery driver fails to be + * probed due to failure of getting battery information + * from EC sometimes. After several retries, the operation + * may work. So add retry code here and 20ms sleep between + * every retries. + */ +static int acpi_battery_update_retry(struct acpi_battery *battery) +{ + int retry, ret; + + for (retry = 5; retry; retry--) { + ret = acpi_battery_update(battery, false); + if (!ret) + break; + + msleep(20); + } + return ret; +} + static int acpi_battery_add(struct acpi_device *device) { int result = 0; @@ -1169,9 +1206,11 @@ static int acpi_battery_add(struct acpi_device *device) mutex_init(&battery->sysfs_lock); if (acpi_has_method(battery->device->handle, "_BIX")) set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); - result = acpi_battery_update(battery, false); + + result = acpi_battery_update_retry(battery); if (result) goto fail; + #ifdef CONFIG_ACPI_PROCFS_POWER result = acpi_battery_add_fs(device); #endif diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index ad11ba4a412d..a66ab658abbc 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1,11 +1,14 @@ /* - * ec.c - ACPI Embedded Controller Driver (v2.1) + * ec.c - ACPI Embedded Controller Driver (v2.2) * - * Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de> - * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> - * Copyright (C) 2004 Luming Yu <luming.yu@intel.com> - * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> - * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2001-2014 Intel Corporation + * Author: 2014 Lv Zheng <lv.zheng@intel.com> + * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> + * 2006 Denis Sadykov <denis.m.sadykov@intel.com> + * 2004 Luming Yu <luming.yu@intel.com> + * 2001, 2002 Andy Grover <andrew.grover@intel.com> + * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * @@ -52,6 +55,7 @@ /* EC status register */ #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */ #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */ +#define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */ #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */ #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */ @@ -78,6 +82,9 @@ enum { EC_FLAGS_BLOCKED, /* Transactions are blocked */ }; +#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ +#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */ + /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; module_param(ec_delay, uint, 0644); @@ -109,7 +116,7 @@ struct transaction { u8 ri; u8 wlen; u8 rlen; - bool done; + u8 flags; }; struct acpi_ec *boot_ec, *first_ec; @@ -127,83 +134,104 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ static inline u8 acpi_ec_read_status(struct acpi_ec *ec) { u8 x = inb(ec->command_addr); - pr_debug("---> status = 0x%2.2x\n", x); + pr_debug("EC_SC(R) = 0x%2.2x " + "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n", + x, + !!(x & ACPI_EC_FLAG_SCI), + !!(x & ACPI_EC_FLAG_BURST), + !!(x & ACPI_EC_FLAG_CMD), + !!(x & ACPI_EC_FLAG_IBF), + !!(x & ACPI_EC_FLAG_OBF)); return x; } static inline u8 acpi_ec_read_data(struct acpi_ec *ec) { u8 x = inb(ec->data_addr); - pr_debug("---> data = 0x%2.2x\n", x); + pr_debug("EC_DATA(R) = 0x%2.2x\n", x); return x; } static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) { - pr_debug("<--- command = 0x%2.2x\n", command); + pr_debug("EC_SC(W) = 0x%2.2x\n", command); outb(command, ec->command_addr); } static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) { - pr_debug("<--- data = 0x%2.2x\n", data); + pr_debug("EC_DATA(W) = 0x%2.2x\n", data); outb(data, ec->data_addr); } -static int ec_transaction_done(struct acpi_ec *ec) +static int ec_transaction_completed(struct acpi_ec *ec) { unsigned long flags; int ret = 0; spin_lock_irqsave(&ec->lock, flags); - if (!ec->curr || ec->curr->done) + if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) ret = 1; spin_unlock_irqrestore(&ec->lock, flags); return ret; } -static void start_transaction(struct acpi_ec *ec) +static bool advance_transaction(struct acpi_ec *ec) { - ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; - ec->curr->done = false; - acpi_ec_write_cmd(ec, ec->curr->command); -} - -static void advance_transaction(struct acpi_ec *ec, u8 status) -{ - unsigned long flags; struct transaction *t; + u8 status; + bool wakeup = false; - spin_lock_irqsave(&ec->lock, flags); + pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK"); + status = acpi_ec_read_status(ec); t = ec->curr; if (!t) - goto unlock; - if (t->wlen > t->wi) { - if ((status & ACPI_EC_FLAG_IBF) == 0) - acpi_ec_write_data(ec, - t->wdata[t->wi++]); - else - goto err; - } else if (t->rlen > t->ri) { - if ((status & ACPI_EC_FLAG_OBF) == 1) { - t->rdata[t->ri++] = acpi_ec_read_data(ec); - if (t->rlen == t->ri) - t->done = true; + goto err; + if (t->flags & ACPI_EC_COMMAND_POLL) { + if (t->wlen > t->wi) { + if ((status & ACPI_EC_FLAG_IBF) == 0) + acpi_ec_write_data(ec, t->wdata[t->wi++]); + else + goto err; + } else if (t->rlen > t->ri) { + if ((status & ACPI_EC_FLAG_OBF) == 1) { + t->rdata[t->ri++] = acpi_ec_read_data(ec); + if (t->rlen == t->ri) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; + wakeup = true; + } + } else + goto err; + } else if (t->wlen == t->wi && + (status & ACPI_EC_FLAG_IBF) == 0) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; + wakeup = true; + } + return wakeup; + } else { + if ((status & ACPI_EC_FLAG_IBF) == 0) { + acpi_ec_write_cmd(ec, t->command); + t->flags |= ACPI_EC_COMMAND_POLL; } else goto err; - } else if (t->wlen == t->wi && - (status & ACPI_EC_FLAG_IBF) == 0) - t->done = true; - goto unlock; + return wakeup; + } err: /* * If SCI bit is set, then don't think it's a false IRQ * otherwise will take a not handled IRQ as a false one. */ - if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI)) - ++t->irq_count; + if (!(status & ACPI_EC_FLAG_SCI)) { + if (in_interrupt() && t) + ++t->irq_count; + } + return wakeup; +} -unlock: - spin_unlock_irqrestore(&ec->lock, flags); +static void start_transaction(struct acpi_ec *ec) +{ + ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; + ec->curr->flags = 0; + (void)advance_transaction(ec); } static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); @@ -228,15 +256,17 @@ static int ec_poll(struct acpi_ec *ec) /* don't sleep with disabled interrupts */ if (EC_FLAGS_MSI || irqs_disabled()) { udelay(ACPI_EC_MSI_UDELAY); - if (ec_transaction_done(ec)) + if (ec_transaction_completed(ec)) return 0; } else { if (wait_event_timeout(ec->wait, - ec_transaction_done(ec), + ec_transaction_completed(ec), msecs_to_jiffies(1))) return 0; } - advance_transaction(ec, acpi_ec_read_status(ec)); + spin_lock_irqsave(&ec->lock, flags); + (void)advance_transaction(ec); + spin_unlock_irqrestore(&ec->lock, flags); } while (time_before(jiffies, delay)); pr_debug("controller reset, restart transaction\n"); spin_lock_irqsave(&ec->lock, flags); @@ -268,23 +298,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, return ret; } -static int ec_check_ibf0(struct acpi_ec *ec) -{ - u8 status = acpi_ec_read_status(ec); - return (status & ACPI_EC_FLAG_IBF) == 0; -} - -static int ec_wait_ibf0(struct acpi_ec *ec) -{ - unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); - /* interrupt wait manually if GPE mode is not active */ - while (time_before(jiffies, delay)) - if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), - msecs_to_jiffies(1))) - return 0; - return -ETIME; -} - static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) { int status; @@ -305,12 +318,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) goto unlock; } } - if (ec_wait_ibf0(ec)) { - pr_err("input buffer is not empty, " - "aborting transaction\n"); - status = -ETIME; - goto end; - } pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n", t->command, t->wdata ? t->wdata[0] : 0); /* disable GPE during transaction if storm is detected */ @@ -334,7 +341,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) set_bit(EC_FLAGS_GPE_STORM, &ec->flags); } pr_debug("transaction end\n"); -end: if (ec->global_lock) acpi_release_global_lock(glk); unlock: @@ -634,17 +640,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state) static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, u32 gpe_number, void *data) { + unsigned long flags; struct acpi_ec *ec = data; - u8 status = acpi_ec_read_status(ec); - pr_debug("~~~> interrupt, status:0x%02x\n", status); - - advance_transaction(ec, status); - if (ec_transaction_done(ec) && - (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { + spin_lock_irqsave(&ec->lock, flags); + if (advance_transaction(ec)) wake_up(&ec->wait); - ec_check_sci(ec, acpi_ec_read_status(ec)); - } + spin_unlock_irqrestore(&ec->lock, flags); + ec_check_sci(ec, acpi_ec_read_status(ec)); return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; } @@ -1066,8 +1069,10 @@ int __init acpi_ec_ecdt_probe(void) /* fall through */ } - if (EC_FLAGS_SKIP_DSDT_SCAN) + if (EC_FLAGS_SKIP_DSDT_SCAN) { + kfree(saved_ec); return -ENODEV; + } /* This workaround is needed only on some broken machines, * which require early EC, but fail to provide ECDT */ @@ -1105,6 +1110,7 @@ install: } error: kfree(boot_ec); + kfree(saved_ec); boot_ec = NULL; return -ENODEV; } diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 0bdacc5e26a3..2ba8f02ced36 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) switch (ares->type) { case ACPI_RESOURCE_TYPE_MEMORY24: memory24 = &ares->data.memory24; - if (!memory24->address_length) + if (!memory24->minimum && !memory24->address_length) return false; acpi_dev_get_memresource(res, memory24->minimum, memory24->address_length, @@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) break; case ACPI_RESOURCE_TYPE_MEMORY32: memory32 = &ares->data.memory32; - if (!memory32->address_length) + if (!memory32->minimum && !memory32->address_length) return false; acpi_dev_get_memresource(res, memory32->minimum, memory32->address_length, @@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) break; case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: fixed_memory32 = &ares->data.fixed_memory32; - if (!fixed_memory32->address_length) + if (!fixed_memory32->address && !fixed_memory32->address_length) return false; acpi_dev_get_memresource(res, fixed_memory32->address, fixed_memory32->address_length, @@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) switch (ares->type) { case ACPI_RESOURCE_TYPE_IO: io = &ares->data.io; - if (!io->address_length) + if (!io->minimum && !io->address_length) return false; acpi_dev_get_ioresource(res, io->minimum, io->address_length, @@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) break; case ACPI_RESOURCE_TYPE_FIXED_IO: fixed_io = &ares->data.fixed_io; - if (!fixed_io->address_length) + if (!fixed_io->address && !fixed_io->address_length) return false; acpi_dev_get_ioresource(res, fixed_io->address, fixed_io->address_length, diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index fb9ffe9adc64..350d52a8f781 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot"); MODULE_DESCRIPTION("ACPI Video Driver"); MODULE_LICENSE("GPL"); -static bool brightness_switch_enabled; +static bool brightness_switch_enabled = 1; module_param(brightness_switch_enabled, bool, 0644); /* @@ -241,13 +241,14 @@ static bool acpi_video_use_native_backlight(void) return use_native_backlight_dmi; } -static bool acpi_video_verify_backlight_support(void) +bool acpi_video_verify_backlight_support(void) { if (acpi_osi_is_win8() && acpi_video_use_native_backlight() && backlight_device_registered(BACKLIGHT_RAW)) return false; return acpi_video_backlight_support(); } +EXPORT_SYMBOL_GPL(acpi_video_verify_backlight_support); /* backlight device sysfs support */ static int acpi_video_get_brightness(struct backlight_device *bd) @@ -563,6 +564,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { }, }, { + .callback = video_set_use_native_backlight, + .ident = "Acer TravelMate B113", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"), + }, + }, + { .callback = video_set_use_native_backlight, .ident = "HP ProBook 4340s", .matches = { @@ -572,6 +581,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { }, { .callback = video_set_use_native_backlight, + .ident = "HP ProBook 4540s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"), + }, + }, + { + .callback = video_set_use_native_backlight, .ident = "HP ProBook 2013 models", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 33e3db548a29..c42feb2bacd0 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -166,6 +166,14 @@ static struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), }, }, + { + .callback = video_detect_force_vendor, + .ident = "Dell Inspiron 5737", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), + }, + }, { }, }; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index dae5607e1115..4cd52a4541a9 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* Promise */ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ + { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */ /* Asmedia */ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 05882e4445a6..5513296e5e2e 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -371,7 +371,9 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class, int pmp, unsigned long deadline, int (*check_ready)(struct ata_link *link)); +unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); int ahci_stop_engine(struct ata_port *ap); +void ahci_start_fis_rx(struct ata_port *ap); void ahci_start_engine(struct ata_port *ap); int ahci_check_ready(struct ata_link *link); int ahci_kick_engine(struct ata_port *ap); diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index 3a901520c62b..cac4360f272a 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -58,6 +58,8 @@ enum ahci_imx_type { struct imx_ahci_priv { struct platform_device *ahci_pdev; enum ahci_imx_type type; + struct clk *sata_clk; + struct clk *sata_ref_clk; struct clk *ahb_clk; struct regmap *gpr; bool no_device; @@ -224,7 +226,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv) return ret; } - ret = ahci_platform_enable_clks(hpriv); + ret = clk_prepare_enable(imxpriv->sata_ref_clk); if (ret < 0) goto disable_regulator; @@ -291,7 +293,7 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv) !IMX6Q_GPR13_SATA_MPLL_CLK_EN); } - ahci_platform_disable_clks(hpriv); + clk_disable_unprepare(imxpriv->sata_ref_clk); if (hpriv->target_pwr) regulator_disable(hpriv->target_pwr); @@ -324,6 +326,9 @@ static void ahci_imx_error_handler(struct ata_port *ap) writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR); imx_sata_disable(hpriv); imxpriv->no_device = true; + + dev_info(ap->dev, "no device found, disabling link.\n"); + dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX ".hotplug=1 to enable hotplug\n"); } static int ahci_imx_softreset(struct ata_link *link, unsigned int *class, @@ -385,6 +390,19 @@ static int imx_ahci_probe(struct platform_device *pdev) imxpriv->no_device = false; imxpriv->first_time = true; imxpriv->type = (enum ahci_imx_type)of_id->data; + + imxpriv->sata_clk = devm_clk_get(dev, "sata"); + if (IS_ERR(imxpriv->sata_clk)) { + dev_err(dev, "can't get sata clock.\n"); + return PTR_ERR(imxpriv->sata_clk); + } + + imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref"); + if (IS_ERR(imxpriv->sata_ref_clk)) { + dev_err(dev, "can't get sata_ref clock.\n"); + return PTR_ERR(imxpriv->sata_ref_clk); + } + imxpriv->ahb_clk = devm_clk_get(dev, "ahb"); if (IS_ERR(imxpriv->ahb_clk)) { dev_err(dev, "can't get ahb clock.\n"); @@ -407,10 +425,14 @@ static int imx_ahci_probe(struct platform_device *pdev) hpriv->plat_data = imxpriv; - ret = imx_sata_enable(hpriv); + ret = clk_prepare_enable(imxpriv->sata_clk); if (ret) return ret; + ret = imx_sata_enable(hpriv); + if (ret) + goto disable_clk; + /* * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, * and IP vendor specific register IMX_TIMER1MS. @@ -435,16 +457,24 @@ static int imx_ahci_probe(struct platform_device *pdev) ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0, 0); if (ret) - imx_sata_disable(hpriv); + goto disable_sata; + return 0; + +disable_sata: + imx_sata_disable(hpriv); +disable_clk: + clk_disable_unprepare(imxpriv->sata_clk); return ret; } static void ahci_imx_host_stop(struct ata_host *host) { struct ahci_host_priv *hpriv = host->private_data; + struct imx_ahci_priv *imxpriv = hpriv->plat_data; imx_sata_disable(hpriv); + clk_disable_unprepare(imxpriv->sata_clk); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index ebe505c17763..b10d81ddb528 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c @@ -58,7 +58,7 @@ static int ahci_probe(struct platform_device *pdev) } if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) - hflags |= AHCI_HFLAG_NO_FBS; + hflags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ; rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, hflags, 0, 0); diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index 042a9bb45c86..ee3a3659bd9e 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -78,6 +78,7 @@ struct xgene_ahci_context { struct ahci_host_priv *hpriv; struct device *dev; + u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/ void __iomem *csr_core; /* Core CSR address of IP */ void __iomem *csr_diag; /* Diag CSR address of IP */ void __iomem *csr_axi; /* AXI CSR address of IP */ @@ -98,20 +99,62 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx) } /** + * xgene_ahci_restart_engine - Restart the dma engine. + * @ap : ATA port of interest + * + * Restarts the dma engine inside the controller. + */ +static int xgene_ahci_restart_engine(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + + ahci_stop_engine(ap); + ahci_start_fis_rx(ap); + hpriv->start_engine(ap); + + return 0; +} + +/** + * xgene_ahci_qc_issue - Issue commands to the device + * @qc: Command to issue + * + * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot + * clear the BSY bit after receiving the PIO setup FIS. This results in the dma + * state machine goes into the CMFatalErrorUpdate state and locks up. By + * restarting the dma engine, it removes the controller out of lock up state. + */ +static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ahci_host_priv *hpriv = ap->host->private_data; + struct xgene_ahci_context *ctx = hpriv->plat_data; + int rc = 0; + + if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA)) + xgene_ahci_restart_engine(ap); + + rc = ahci_qc_issue(qc); + + /* Save the last command issued */ + ctx->last_cmd[ap->port_no] = qc->tf.command; + + return rc; +} + +/** * xgene_ahci_read_id - Read ID data from the specified device * @dev: device * @tf: proposed taskfile * @id: data buffer * * This custom read ID function is required due to the fact that the HW - * does not support DEVSLP and the controller state machine may get stuck - * after processing the ID query command. + * does not support DEVSLP. */ static unsigned int xgene_ahci_read_id(struct ata_device *dev, struct ata_taskfile *tf, u16 *id) { u32 err_mask; - void __iomem *port_mmio = ahci_port_base(dev->link->ap); err_mask = ata_do_dev_read_id(dev, tf, id); if (err_mask) @@ -133,16 +176,6 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev, */ id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8); - /* - * Due to HW errata, restart the port if no other command active. - * Otherwise the controller may get stuck. - */ - if (!readl(port_mmio + PORT_CMD_ISSUE)) { - writel(PORT_CMD_FIS_RX, port_mmio + PORT_CMD); - readl(port_mmio + PORT_CMD); /* Force a barrier */ - writel(PORT_CMD_FIS_RX | PORT_CMD_START, port_mmio + PORT_CMD); - readl(port_mmio + PORT_CMD); /* Force a barrier */ - } return 0; } @@ -300,6 +333,7 @@ static struct ata_port_operations xgene_ahci_ops = { .host_stop = xgene_ahci_host_stop, .hardreset = xgene_ahci_hardreset, .read_id = xgene_ahci_read_id, + .qc_issue = xgene_ahci_qc_issue, }; static const struct ata_port_info xgene_ahci_port_info = { diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 40ea583d3610..d72ce0470309 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -68,7 +68,6 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); -static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); static int ahci_port_start(struct ata_port *ap); static void ahci_port_stop(struct ata_port *ap); @@ -620,7 +619,7 @@ int ahci_stop_engine(struct ata_port *ap) } EXPORT_SYMBOL_GPL(ahci_stop_engine); -static void ahci_start_fis_rx(struct ata_port *ap) +void ahci_start_fis_rx(struct ata_port *ap) { void __iomem *port_mmio = ahci_port_base(ap); struct ahci_host_priv *hpriv = ap->host->private_data; @@ -646,6 +645,7 @@ static void ahci_start_fis_rx(struct ata_port *ap) /* flush */ readl(port_mmio + PORT_CMD); } +EXPORT_SYMBOL_GPL(ahci_start_fis_rx); static int ahci_stop_fis_rx(struct ata_port *ap) { @@ -1945,7 +1945,7 @@ irqreturn_t ahci_interrupt(int irq, void *dev_instance) } EXPORT_SYMBOL_GPL(ahci_interrupt); -static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) +unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; void __iomem *port_mmio = ahci_port_base(ap); @@ -1974,6 +1974,7 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) return 0; } +EXPORT_SYMBOL_GPL(ahci_qc_issue); static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) { diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 3a5b4ed25a4f..b0077589f065 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -250,8 +250,13 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev) if (IS_ERR(hpriv->phy)) { rc = PTR_ERR(hpriv->phy); switch (rc) { - case -ENODEV: case -ENOSYS: + /* No PHY support. Check if PHY is required. */ + if (of_find_property(dev->of_node, "phys", NULL)) { + dev_err(dev, "couldn't get sata-phy: ENOSYS\n"); + goto err_out; + } + case -ENODEV: /* continue normally */ hpriv->phy = NULL; break; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 18d97d5c7d90..677c0c1b03bd 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) * ata_qc_new - Request an available ATA command, for queueing * @ap: target port * + * Some ATA host controllers may implement a queue depth which is less + * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond + * the hardware limitation. + * * LOCKING: * None. */ @@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) { struct ata_queued_cmd *qc = NULL; + unsigned int max_queue = ap->host->n_tags; unsigned int i, tag; /* no command while frozen */ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) return NULL; - for (i = 0; i < ATA_MAX_QUEUE; i++) { - tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE; + for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) { + tag = tag < max_queue ? tag : 0; /* the last tag is reserved for internal command. */ if (tag == ATA_TAG_INTERNAL) @@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev, { spin_lock_init(&host->lock); mutex_init(&host->eh_mutex); + host->n_tags = ATA_MAX_QUEUE - 1; host->dev = dev; host->ops = ops; } @@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) { int i, rc; + host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); + /* host must have been started */ if (!(host->flags & ATA_HOST_STARTED)) { dev_err(host->dev, "BUG: trying to register unstarted host\n"); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 6760fc4e85b8..dad83df555c4 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, case ATA_DEV_ATA: if (err & ATA_ICRC) qc->err_mask |= AC_ERR_ATA_BUS; - if (err & ATA_UNC) + if (err & (ATA_UNC | ATA_AMNF)) qc->err_mask |= AC_ERR_MEDIA; if (err & ATA_IDNF) qc->err_mask |= AC_ERR_INVALID; @@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link) } if (cmd->command != ATA_CMD_PACKET && - (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | - ATA_ABORTED))) - ata_dev_err(qc->dev, "error: { %s%s%s%s}\n", + (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF | + ATA_IDNF | ATA_ABORTED))) + ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", res->feature & ATA_ICRC ? "ICRC " : "", res->feature & ATA_UNC ? "UNC " : "", + res->feature & ATA_AMNF ? "AMNF " : "", res->feature & ATA_IDNF ? "IDNF " : "", res->feature & ATA_ABORTED ? "ABRT " : ""); #endif diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c index 6ad5c072ce34..4d37c5415fc7 100644 --- a/drivers/ata/pata_ep93xx.c +++ b/drivers/ata/pata_ep93xx.c @@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev) struct ep93xx_pata_data *drv_data; struct ata_host *host; struct ata_port *ap; - unsigned int irq; + int irq; struct resource *mem_res; void __iomem *ide_base; int err; diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 9e9227e1762d..eee48c49f5de 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) return dev->archdata.irqs[num]; #else struct resource *r; - if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) - return of_irq_get(dev->dev.of_node, num); + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { + int ret; + + ret = of_irq_get(dev->dev.of_node, num); + if (ret >= 0 || ret == -EPROBE_DEFER) + return ret; + } r = platform_get_resource(dev, IORESOURCE_IRQ, num); @@ -133,8 +138,13 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name) { struct resource *r; - if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) - return of_irq_get_byname(dev->dev.of_node, name); + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { + int ret; + + ret = of_irq_get_byname(dev->dev.of_node, name); + if (ret >= 0 || ret == -EPROBE_DEFER) + return ret; + } r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); return r ? r->start : -ENXIO; diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 1b35c45c92b7..3f2e16738080 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection) struct task_struct *opa; kref_get(&connection->kref); + /* We may just have force_sig()'ed this thread + * to get it out of some blocking network function. + * Clear signals; otherwise kthread_run(), which internally uses + * wait_on_completion_killable(), will mistake our pending signal + * for a new fatal signal and fail. */ + flush_signals(current); opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h"); if (IS_ERR(opa)) { drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n"); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 48eccb350180..36e54be402df 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -624,7 +624,16 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity) zram->disksize = 0; if (reset_capacity) set_capacity(zram->disk, 0); + up_write(&zram->init_lock); + + /* + * Revalidate disk out of the init_lock to avoid lockdep splat. + * It's okay because disk's capacity is protected by init_lock + * so that revalidate_disk always sees up-to-date capacity. + */ + if (reset_capacity) + revalidate_disk(zram->disk); } static ssize_t disksize_store(struct device *dev, @@ -665,6 +674,14 @@ static ssize_t disksize_store(struct device *dev, zram->disksize = disksize; set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); up_write(&zram->init_lock); + + /* + * Revalidate disk out of the init_lock to avoid lockdep splat. + * It's okay because disk's capacity is protected by init_lock + * so that revalidate_disk always sees up-to-date capacity. + */ + revalidate_disk(zram->disk); + return len; out_destroy_comp: diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index f98380648cb3..f50dffc0374f 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -90,7 +90,6 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x0b05, 0x17d0) }, { USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x3004) }, - { USB_DEVICE(0x0CF3, 0x3005) }, { USB_DEVICE(0x0CF3, 0x3008) }, { USB_DEVICE(0x0CF3, 0x311D) }, { USB_DEVICE(0x0CF3, 0x311E) }, @@ -140,7 +139,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index a1c80b0c7663..6250fc2fb93a 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -162,7 +162,6 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 04680ead9275..fede8ca7147c 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c) H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) { BT_ERR("Non-link packet received in non-active state"); h5_reset_rx(h5); + return 0; } h5->rx_func = h5_rx_payload; diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 334601cc81cf..c4419ea1ab07 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -55,16 +55,41 @@ static DEFINE_MUTEX(rng_mutex); static int data_avail; static u8 *rng_buffer; +static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, + int wait); + static size_t rng_buffer_size(void) { return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; } +static void add_early_randomness(struct hwrng *rng) +{ + unsigned char bytes[16]; + int bytes_read; + + /* + * Currently only virtio-rng cannot return data during device + * probe, and that's handled in virtio-rng.c itself. If there + * are more such devices, this call to rng_get_data can be + * made conditional here instead of doing it per-device. + */ + bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); + if (bytes_read > 0) + add_device_randomness(bytes, bytes_read); +} + static inline int hwrng_init(struct hwrng *rng) { - if (!rng->init) - return 0; - return rng->init(rng); + if (rng->init) { + int ret; + + ret = rng->init(rng); + if (ret) + return ret; + } + add_early_randomness(rng); + return 0; } static inline void hwrng_cleanup(struct hwrng *rng) @@ -304,8 +329,6 @@ int hwrng_register(struct hwrng *rng) { int err = -EINVAL; struct hwrng *old_rng, *tmp; - unsigned char bytes[16]; - int bytes_read; if (rng->name == NULL || (rng->data_read == NULL && rng->read == NULL)) @@ -347,9 +370,17 @@ int hwrng_register(struct hwrng *rng) INIT_LIST_HEAD(&rng->list); list_add_tail(&rng->list, &rng_list); - bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); - if (bytes_read > 0) - add_device_randomness(bytes, bytes_read); + if (old_rng && !rng->init) { + /* + * Use a new device's input to add some randomness to + * the system. If this rng device isn't going to be + * used right away, its init function hasn't been + * called yet; so only use the randomness from devices + * that don't need an init callback. + */ + add_early_randomness(rng); + } + out_unlock: mutex_unlock(&rng_mutex); out: diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index f3e71501de54..e9b15bc18b4d 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -38,6 +38,8 @@ struct virtrng_info { int index; }; +static bool probe_done; + static void random_recv_done(struct virtqueue *vq) { struct virtrng_info *vi = vq->vdev->priv; @@ -67,6 +69,13 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) int ret; struct virtrng_info *vi = (struct virtrng_info *)rng->priv; + /* + * Don't ask host for data till we're setup. This call can + * happen during hwrng_register(), after commit d9e7972619. + */ + if (unlikely(!probe_done)) + return 0; + if (!vi->busy) { vi->busy = true; init_completion(&vi->have_data); @@ -137,6 +146,7 @@ static int probe_common(struct virtio_device *vdev) return err; } + probe_done = true; return 0; } diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index d915707d2ba1..93dcad0c1cbe 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c @@ -138,7 +138,9 @@ static int i8k_smm(struct smm_regs *regs) if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) return -ENOMEM; cpumask_copy(old_mask, ¤t->cpus_allowed); - set_cpus_allowed_ptr(current, cpumask_of(0)); + rc = set_cpus_allowed_ptr(current, cpumask_of(0)); + if (rc) + goto out; if (smp_processor_id() != 0) { rc = -EBUSY; goto out; diff --git a/drivers/char/random.c b/drivers/char/random.c index 0a7ac0a7b252..71529e196b84 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -641,7 +641,7 @@ retry: } while (unlikely(entropy_count < pool_size-2 && pnfrac)); } - if (entropy_count < 0) { + if (unlikely(entropy_count < 0)) { pr_warn("random: negative entropy/overflow: pool %s count %d\n", r->name, entropy_count); WARN_ON(1); @@ -981,7 +981,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, int reserved) { int entropy_count, orig; - size_t ibytes; + size_t ibytes, nfrac; BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); @@ -999,7 +999,17 @@ retry: } if (ibytes < min) ibytes = 0; - if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0) + + if (unlikely(entropy_count < 0)) { + pr_warn("random: negative entropy count: pool %s count %d\n", + r->name, entropy_count); + WARN_ON(1); + entropy_count = 0; + } + nfrac = ibytes << (ENTROPY_SHIFT + 3); + if ((size_t) entropy_count > nfrac) + entropy_count -= nfrac; + else entropy_count = 0; if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) @@ -1376,6 +1386,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) "with %d bits of entropy available\n", current->comm, nonblocking_pool.entropy_total); + nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3)); ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool), diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 9b7b5859a420..3757e9e72d37 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -230,16 +230,13 @@ static int s2mps11_clk_probe(struct platform_device *pdev) goto err_reg; } - s2mps11_clk->lookup = devm_kzalloc(&pdev->dev, - sizeof(struct clk_lookup), GFP_KERNEL); + s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk, + s2mps11_name(s2mps11_clk), NULL); if (!s2mps11_clk->lookup) { ret = -ENOMEM; goto err_lup; } - s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk); - s2mps11_clk->lookup->clk = s2mps11_clk->clk; - clkdev_add(s2mps11_clk->lookup); } diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c index 12f3c0b64fcd..4c449b3170f6 100644 --- a/drivers/clk/qcom/mmcc-msm8960.c +++ b/drivers/clk/qcom/mmcc-msm8960.c @@ -1209,7 +1209,7 @@ static struct clk_branch rot_clk = { static u8 mmcc_pxo_hdmi_map[] = { [P_PXO] = 0, - [P_HDMI_PLL] = 2, + [P_HDMI_PLL] = 3, }; static const char *mmcc_pxo_hdmi[] = { diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 4f150c9dd38c..7f4a473a7ad7 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -925,21 +925,13 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { GATE(CLK_RTC, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15, 0, 0), GATE(CLK_KEYIF, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0), - GATE(CLK_SCLK_PWM_ISP, "sclk_pwm_isp", "div_pwm_isp", - E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "div_spi0_isp_pre", - E4X12_SRC_MASK_ISP, 4, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "div_spi1_isp_pre", - E4X12_SRC_MASK_ISP, 8, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "div_uart_isp", - E4X12_SRC_MASK_ISP, 12, CLK_SET_RATE_PARENT, 0), - GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "sclk_pwm_isp", + GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "div_pwm_isp", E4X12_GATE_IP_ISP, 0, 0, 0), - GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "sclk_spi0_isp", + GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "div_spi0_isp_pre", E4X12_GATE_IP_ISP, 1, 0, 0), - GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "sclk_spi1_isp", + GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "div_spi1_isp_pre", E4X12_GATE_IP_ISP, 2, 0, 0), - GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "sclk_uart_isp", + GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "div_uart_isp", E4X12_GATE_IP_ISP, 3, 0, 0), GATE(CLK_WDT, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0), GATE(CLK_PCM0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2, diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index 1fad4c5e3f5d..184f64293b26 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -661,7 +661,7 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0), GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0), GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub", - GATE_IP_DISP1, 2, 0, 0), + GATE_IP_DISP1, 9, 0, 0), GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 8, 0, 0), GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0), diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 9d7d7eed03fd..a4e6cc782e5c 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -631,7 +631,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = { SRC_TOP4, 16, 1), MUX(0, "mout_user_aclk266", mout_user_aclk266_p, SRC_TOP4, 20, 1), MUX(0, "mout_user_aclk166", mout_user_aclk166_p, SRC_TOP4, 24, 1), - MUX(0, "mout_user_aclk333", mout_user_aclk333_p, SRC_TOP4, 28, 1), + MUX(CLK_MOUT_USER_ACLK333, "mout_user_aclk333", mout_user_aclk333_p, + SRC_TOP4, 28, 1), MUX(0, "mout_user_aclk400_disp1", mout_user_aclk400_disp1_p, SRC_TOP5, 0, 1), @@ -684,7 +685,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = { SRC_TOP11, 12, 1), MUX(0, "mout_sw_aclk266", mout_sw_aclk266_p, SRC_TOP11, 20, 1), MUX(0, "mout_sw_aclk166", mout_sw_aclk166_p, SRC_TOP11, 24, 1), - MUX(0, "mout_sw_aclk333", mout_sw_aclk333_p, SRC_TOP11, 28, 1), + MUX(CLK_MOUT_SW_ACLK333, "mout_sw_aclk333", mout_sw_aclk333_p, + SRC_TOP11, 28, 1), MUX(0, "mout_sw_aclk400_disp1", mout_sw_aclk400_disp1_p, SRC_TOP12, 4, 1), @@ -890,8 +892,6 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = { GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen", GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0), - GATE(CLK_ACLK66_PERIC, "aclk66_peric", "mout_user_aclk66_peric", - GATE_BUS_TOP, 11, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk266_isp", "mout_user_aclk266_isp", GATE_BUS_TOP, 13, 0, 0), GATE(0, "aclk166", "mout_user_aclk166", @@ -994,34 +994,61 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = { SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0), /* PERIC Block */ - GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_IP_PERIC, 0, 0, 0), - GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_IP_PERIC, 1, 0, 0), - GATE(CLK_UART2, "uart2", "aclk66_peric", GATE_IP_PERIC, 2, 0, 0), - GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_IP_PERIC, 3, 0, 0), - GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_IP_PERIC, 6, 0, 0), - GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_IP_PERIC, 7, 0, 0), - GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_IP_PERIC, 8, 0, 0), - GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_IP_PERIC, 9, 0, 0), - GATE(CLK_USI0, "usi0", "aclk66_peric", GATE_IP_PERIC, 10, 0, 0), - GATE(CLK_USI1, "usi1", "aclk66_peric", GATE_IP_PERIC, 11, 0, 0), - GATE(CLK_USI2, "usi2", "aclk66_peric", GATE_IP_PERIC, 12, 0, 0), - GATE(CLK_USI3, "usi3", "aclk66_peric", GATE_IP_PERIC, 13, 0, 0), - GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_IP_PERIC, 14, 0, 0), - GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_IP_PERIC, 15, 0, 0), - GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_IP_PERIC, 16, 0, 0), - GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_IP_PERIC, 17, 0, 0), - GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_IP_PERIC, 18, 0, 0), - GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_IP_PERIC, 20, 0, 0), - GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_IP_PERIC, 21, 0, 0), - GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_IP_PERIC, 22, 0, 0), - GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_IP_PERIC, 23, 0, 0), - GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_IP_PERIC, 24, 0, 0), - GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_IP_PERIC, 26, 0, 0), - GATE(CLK_USI4, "usi4", "aclk66_peric", GATE_IP_PERIC, 28, 0, 0), - GATE(CLK_USI5, "usi5", "aclk66_peric", GATE_IP_PERIC, 30, 0, 0), - GATE(CLK_USI6, "usi6", "aclk66_peric", GATE_IP_PERIC, 31, 0, 0), - - GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0), + GATE(CLK_UART0, "uart0", "mout_user_aclk66_peric", + GATE_IP_PERIC, 0, 0, 0), + GATE(CLK_UART1, "uart1", "mout_user_aclk66_peric", + GATE_IP_PERIC, 1, 0, 0), + GATE(CLK_UART2, "uart2", "mout_user_aclk66_peric", + GATE_IP_PERIC, 2, 0, 0), + GATE(CLK_UART3, "uart3", "mout_user_aclk66_peric", + GATE_IP_PERIC, 3, 0, 0), + GATE(CLK_I2C0, "i2c0", "mout_user_aclk66_peric", + GATE_IP_PERIC, 6, 0, 0), + GATE(CLK_I2C1, "i2c1", "mout_user_aclk66_peric", + GATE_IP_PERIC, 7, 0, 0), + GATE(CLK_I2C2, "i2c2", "mout_user_aclk66_peric", + GATE_IP_PERIC, 8, 0, 0), + GATE(CLK_I2C3, "i2c3", "mout_user_aclk66_peric", + GATE_IP_PERIC, 9, 0, 0), + GATE(CLK_USI0, "usi0", "mout_user_aclk66_peric", + GATE_IP_PERIC, 10, 0, 0), + GATE(CLK_USI1, "usi1", "mout_user_aclk66_peric", + GATE_IP_PERIC, 11, 0, 0), + GATE(CLK_USI2, "usi2", "mout_user_aclk66_peric", + GATE_IP_PERIC, 12, 0, 0), + GATE(CLK_USI3, "usi3", "mout_user_aclk66_peric", + GATE_IP_PERIC, 13, 0, 0), + GATE(CLK_I2C_HDMI, "i2c_hdmi", "mout_user_aclk66_peric", + GATE_IP_PERIC, 14, 0, 0), + GATE(CLK_TSADC, "tsadc", "mout_user_aclk66_peric", + GATE_IP_PERIC, 15, 0, 0), + GATE(CLK_SPI0, "spi0", "mout_user_aclk66_peric", + GATE_IP_PERIC, 16, 0, 0), + GATE(CLK_SPI1, "spi1", "mout_user_aclk66_peric", + GATE_IP_PERIC, 17, 0, 0), + GATE(CLK_SPI2, "spi2", "mout_user_aclk66_peric", + GATE_IP_PERIC, 18, 0, 0), + GATE(CLK_I2S1, "i2s1", "mout_user_aclk66_peric", + GATE_IP_PERIC, 20, 0, 0), + GATE(CLK_I2S2, "i2s2", "mout_user_aclk66_peric", + GATE_IP_PERIC, 21, 0, 0), + GATE(CLK_PCM1, "pcm1", "mout_user_aclk66_peric", + GATE_IP_PERIC, 22, 0, 0), + GATE(CLK_PCM2, "pcm2", "mout_user_aclk66_peric", + GATE_IP_PERIC, 23, 0, 0), + GATE(CLK_PWM, "pwm", "mout_user_aclk66_peric", + GATE_IP_PERIC, 24, 0, 0), + GATE(CLK_SPDIF, "spdif", "mout_user_aclk66_peric", + GATE_IP_PERIC, 26, 0, 0), + GATE(CLK_USI4, "usi4", "mout_user_aclk66_peric", + GATE_IP_PERIC, 28, 0, 0), + GATE(CLK_USI5, "usi5", "mout_user_aclk66_peric", + GATE_IP_PERIC, 30, 0, 0), + GATE(CLK_USI6, "usi6", "mout_user_aclk66_peric", + GATE_IP_PERIC, 31, 0, 0), + + GATE(CLK_KEYIF, "keyif", "mout_user_aclk66_peric", + GATE_BUS_PERIC, 22, 0, 0), /* PERIS Block */ GATE(CLK_CHIPID, "chipid", "aclk66_psgen", diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c index ba0716801db2..140f4733c02e 100644 --- a/drivers/clk/samsung/clk-s3c2410.c +++ b/drivers/clk/samsung/clk-s3c2410.c @@ -152,6 +152,11 @@ struct samsung_clock_alias s3c2410_common_aliases[] __initdata = { ALIAS(HCLK, NULL, "hclk"), ALIAS(MPLL, NULL, "mpll"), ALIAS(FCLK, NULL, "fclk"), + ALIAS(PCLK, NULL, "watchdog"), + ALIAS(PCLK_SDI, NULL, "sdi"), + ALIAS(HCLK_NAND, NULL, "nand"), + ALIAS(PCLK_I2S, NULL, "iis"), + ALIAS(PCLK_I2C, NULL, "i2c"), }; /* S3C2410 specific clocks */ @@ -378,7 +383,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f, if (!np) s3c2410_common_clk_register_fixed_ext(ctx, xti_f); - if (current_soc == 2410) { + if (current_soc == S3C2410) { if (_get_rate("xti") == 12 * MHZ) { s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl; s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl; @@ -432,7 +437,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f, samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor, ARRAY_SIZE(s3c2410_ffactor)); samsung_clk_register_alias(ctx, s3c2410_aliases, - ARRAY_SIZE(s3c2410_common_aliases)); + ARRAY_SIZE(s3c2410_aliases)); break; case S3C2440: samsung_clk_register_mux(ctx, s3c2440_muxes, diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c index efa16ee592c8..8889ff1c10fc 100644 --- a/drivers/clk/samsung/clk-s3c64xx.c +++ b/drivers/clk/samsung/clk-s3c64xx.c @@ -418,8 +418,10 @@ static struct samsung_clock_alias s3c64xx_clock_aliases[] = { ALIAS(SCLK_MMC2, "s3c-sdhci.2", "mmc_busclk.2"), ALIAS(SCLK_MMC1, "s3c-sdhci.1", "mmc_busclk.2"), ALIAS(SCLK_MMC0, "s3c-sdhci.0", "mmc_busclk.2"), - ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi-bus"), - ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi-bus"), + ALIAS(PCLK_SPI1, "s3c6410-spi.1", "spi_busclk0"), + ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi_busclk2"), + ALIAS(PCLK_SPI0, "s3c6410-spi.0", "spi_busclk0"), + ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi_busclk2"), ALIAS(SCLK_AUDIO1, "samsung-pcm.1", "audio-bus"), ALIAS(SCLK_AUDIO1, "samsung-i2s.1", "audio-bus"), ALIAS(SCLK_AUDIO0, "samsung-pcm.0", "audio-bus"), diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c index c2d204315546..bb5f387774e2 100644 --- a/drivers/clk/spear/spear3xx_clock.c +++ b/drivers/clk/spear/spear3xx_clock.c @@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { } /* array of all spear 320 clock lookups */ #ifdef CONFIG_MACH_SPEAR320 -#define SPEAR320_CONTROL_REG (soc_config_base + 0x0000) +#define SPEAR320_CONTROL_REG (soc_config_base + 0x0010) #define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018) #define SPEAR320_UARTX_PCLK_MASK 0x1 @@ -245,7 +245,8 @@ static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk", "ras_syn0_gclk", }; static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", }; -static void __init spear320_clk_init(void __iomem *soc_config_base) +static void __init spear320_clk_init(void __iomem *soc_config_base, + struct clk *ras_apb_clk) { struct clk *clk; @@ -342,6 +343,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base) SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "a3000000.serial"); + /* Enforce ras_apb_clk */ + clk_set_parent(clk, ras_apb_clk); clk = clk_register_mux(NULL, "uart2_clk", uartx_parents, ARRAY_SIZE(uartx_parents), @@ -349,6 +352,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base) SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "a4000000.serial"); + /* Enforce ras_apb_clk */ + clk_set_parent(clk, ras_apb_clk); clk = clk_register_mux(NULL, "uart3_clk", uartx_parents, ARRAY_SIZE(uartx_parents), @@ -379,12 +384,12 @@ static void __init spear320_clk_init(void __iomem *soc_config_base) clk_register_clkdev(clk, NULL, "60100000.serial"); } #else -static inline void spear320_clk_init(void __iomem *soc_config_base) { } +static inline void spear320_clk_init(void __iomem *sb, struct clk *rc) { } #endif void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base) { - struct clk *clk, *clk1; + struct clk *clk, *clk1, *ras_apb_clk; clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT, 32000); @@ -613,6 +618,7 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_ clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB, RAS_APB_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_apb_clk", NULL); + ras_apb_clk = clk; clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0, RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock); @@ -659,5 +665,5 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_ else if (of_machine_is_compatible("st,spear310")) spear310_clk_init(); else if (of_machine_is_compatible("st,spear320")) - spear320_clk_init(soc_config_base); + spear320_clk_init(soc_config_base, ras_apb_clk); } diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c index 44cd27c5c401..670f90d629d7 100644 --- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c +++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c @@ -29,7 +29,7 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev) r = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg = devm_ioremap_resource(&pdev->dev, r); - if (!reg) + if (IS_ERR(reg)) return PTR_ERR(reg); clk_parent = of_clk_get_parent_name(np, 0); diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c index 5428c9c547cd..72d97279eae1 100644 --- a/drivers/clk/ti/apll.c +++ b/drivers/clk/ti/apll.c @@ -77,13 +77,11 @@ static int dra7_apll_enable(struct clk_hw *hw) if (i == MAX_APLL_WAIT_TRIES) { pr_warn("clock: %s failed transition to '%s'\n", clk_name, (state) ? "locked" : "bypassed"); - } else { + r = -EBUSY; + } else pr_debug("clock: %s transition to '%s' in %d loops\n", clk_name, (state) ? "locked" : "bypassed", i); - r = 0; - } - return r; } @@ -338,7 +336,7 @@ static void __init of_omap2_apll_setup(struct device_node *node) const char *parent_name; u32 val; - ad = kzalloc(sizeof(*clk_hw), GFP_KERNEL); + ad = kzalloc(sizeof(*ad), GFP_KERNEL); clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); init = kzalloc(sizeof(*init), GFP_KERNEL); diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index abd956d5f838..79791e1bf282 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -161,7 +161,8 @@ cleanup: } #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ - defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) + defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \ + defined(CONFIG_SOC_AM43XX) /** * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock * @node: device node for this clock @@ -322,7 +323,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock", of_ti_omap4_dpll_x2_setup); #endif -#ifdef CONFIG_SOC_AM33XX +#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX) static void __init of_ti_am3_dpll_x2_setup(struct device_node *node) { ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL); diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c index 0197a478720c..e9d650e51287 100644 --- a/drivers/clk/ti/mux.c +++ b/drivers/clk/ti/mux.c @@ -160,7 +160,7 @@ static void of_mux_clk_setup(struct device_node *node) u8 clk_mux_flags = 0; u32 mask = 0; u32 shift = 0; - u32 flags = 0; + u32 flags = CLK_SET_RATE_NO_REPARENT; num_parents = of_clk_get_parent_count(node); if (num_parents < 2) { diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index f71d55f5e6e5..ab51bf20a3ed 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -162,7 +162,7 @@ static void exynos4_mct_frc_start(void) exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); } -static cycle_t exynos4_frc_read(struct clocksource *cs) +static cycle_t notrace _exynos4_frc_read(void) { unsigned int lo, hi; u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); @@ -176,6 +176,11 @@ static cycle_t exynos4_frc_read(struct clocksource *cs) return ((cycle_t)hi << 32) | lo; } +static cycle_t exynos4_frc_read(struct clocksource *cs) +{ + return _exynos4_frc_read(); +} + static void exynos4_frc_resume(struct clocksource *cs) { exynos4_mct_frc_start(); @@ -192,13 +197,24 @@ struct clocksource mct_frc = { static u64 notrace exynos4_read_sched_clock(void) { - return exynos4_frc_read(&mct_frc); + return _exynos4_frc_read(); +} + +static struct delay_timer exynos4_delay_timer; + +static cycles_t exynos4_read_current_timer(void) +{ + return _exynos4_frc_read(); } static void __init exynos4_clocksource_init(void) { exynos4_mct_frc_start(); + exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer; + exynos4_delay_timer.freq = clk_rate; + register_current_timer_delay(&exynos4_delay_timer); + if (clocksource_register_hz(&mct_frc, clk_rate)) panic("%s: can't register clocksource\n", mct_frc.name); diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index ebac67115009..7364a538e056 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -104,6 +104,7 @@ config ARM_IMX6Q_CPUFREQ tristate "Freescale i.MX6 cpufreq support" depends on ARCH_MXC depends on REGULATOR_ANATOP + select PM_OPP help This adds cpufreq driver support for Freescale i.MX6 series SoCs. @@ -118,7 +119,7 @@ config ARM_INTEGRATOR If in doubt, say Y. config ARM_KIRKWOOD_CPUFREQ - def_bool MACH_KIRKWOOD + def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD help This adds the CPUFreq driver for Marvell Kirkwood SoCs. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 738c8b7b17dc..db6d9a2fea4d 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -49,7 +49,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o # LITTLE drivers, so that it is probed last. obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o -obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o +obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index ee1ae303a07c..86beda9f950b 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -152,11 +152,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) goto out_put_reg; } - ret = of_init_opp_table(cpu_dev); - if (ret) { - pr_err("failed to init OPP table: %d\n", ret); - goto out_put_clk; - } + /* OPPs might be populated at runtime, don't check for error here */ + of_init_opp_table(cpu_dev); ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 62259d27f03e..6f024852c6fb 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1153,10 +1153,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) * the creation of a brand new one. So we need to perform this update * by invoking update_policy_cpu(). */ - if (recover_policy && cpu != policy->cpu) + if (recover_policy && cpu != policy->cpu) { update_policy_cpu(policy, cpu); - else + WARN_ON(kobject_move(&policy->kobj, &dev->kobj)); + } else { policy->cpu = cpu; + } cpumask_copy(policy->cpus, cpumask_of(cpu)); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 924bb2d42b1c..86631cb6f7de 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -128,6 +128,7 @@ static struct pstate_funcs pstate_funcs; struct perf_limits { int no_turbo; + int turbo_disabled; int max_perf_pct; int min_perf_pct; int32_t max_perf; @@ -287,7 +288,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; limits.no_turbo = clamp_t(int, input, 0 , 1); - + if (limits.turbo_disabled) { + pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); + limits.no_turbo = limits.turbo_disabled; + } return count; } @@ -357,21 +361,21 @@ static int byt_get_min_pstate(void) { u64 value; rdmsrl(BYT_RATIOS, value); - return (value >> 8) & 0x3F; + return (value >> 8) & 0x7F; } static int byt_get_max_pstate(void) { u64 value; rdmsrl(BYT_RATIOS, value); - return (value >> 16) & 0x3F; + return (value >> 16) & 0x7F; } static int byt_get_turbo_pstate(void) { u64 value; rdmsrl(BYT_TURBO_RATIOS, value); - return value & 0x3F; + return value & 0x7F; } static void byt_set_pstate(struct cpudata *cpudata, int pstate) @@ -381,7 +385,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) u32 vid; val = pstate << 8; - if (limits.no_turbo) + if (limits.no_turbo && !limits.turbo_disabled) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -405,8 +409,8 @@ static void byt_get_vid(struct cpudata *cpudata) rdmsrl(BYT_VIDS, value); - cpudata->vid.min = int_tofp((value >> 8) & 0x3f); - cpudata->vid.max = int_tofp((value >> 16) & 0x3f); + cpudata->vid.min = int_tofp((value >> 8) & 0x7f); + cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( cpudata->vid.max - cpudata->vid.min, int_tofp(cpudata->pstate.max_pstate - @@ -448,7 +452,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate) u64 val; val = pstate << 8; - if (limits.no_turbo) + if (limits.no_turbo && !limits.turbo_disabled) val |= (u64)1 << 32; wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); @@ -696,9 +700,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu = all_cpu_data[cpunum]; - intel_pstate_get_cpu_pstates(cpu); - cpu->cpu = cpunum; + intel_pstate_get_cpu_pstates(cpu); init_timer_deferrable(&cpu->timer); cpu->timer.function = intel_pstate_timer_func; @@ -741,7 +744,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits.min_perf = int_tofp(1); limits.max_perf_pct = 100; limits.max_perf = int_tofp(1); - limits.no_turbo = 0; + limits.no_turbo = limits.turbo_disabled; return 0; } limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; @@ -784,6 +787,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) { struct cpudata *cpu; int rc; + u64 misc_en; rc = intel_pstate_init_cpu(policy->cpu); if (rc) @@ -791,8 +795,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) cpu = all_cpu_data[policy->cpu]; - if (!limits.no_turbo && - limits.min_perf_pct == 100 && limits.max_perf_pct == 100) + rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); + if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || + cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) { + limits.turbo_disabled = 1; + limits.no_turbo = 1; + } + if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c index 546376719d8f..b5befc211172 100644 --- a/drivers/cpufreq/sa1110-cpufreq.c +++ b/drivers/cpufreq/sa1110-cpufreq.c @@ -349,7 +349,7 @@ static int __init sa1110_clk_init(void) name = "K4S641632D"; if (machine_is_h3100()) name = "KM416S4030CT"; - if (machine_is_jornada720()) + if (machine_is_jornada720() || machine_is_h3600()) name = "K4S281632B-1H"; if (machine_is_nanoengine()) name = "MT48LC8M16A2TG-75"; diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 1d80bd3636c5..b512a4ba7569 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -453,8 +453,8 @@ static int caam_jr_probe(struct platform_device *pdev) int error; jrdev = &pdev->dev; - jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), - GFP_KERNEL); + jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr), + GFP_KERNEL); if (!jrpriv) return -ENOMEM; @@ -487,10 +487,8 @@ static int caam_jr_probe(struct platform_device *pdev) /* Now do the platform independent part */ error = caam_jr_init(jrdev); /* now turn on hardware */ - if (error) { - kfree(jrpriv); + if (error) return error; - } jrpriv->dev = jrdev; spin_lock(&driver_data.jr_alloc_lock); diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index d028f36ae655..8f8b0b608875 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -86,6 +86,9 @@ #define USBSS_IRQ_PD_COMP (1 << 2) +/* Packet Descriptor */ +#define PD2_ZERO_LENGTH (1 << 19) + struct cppi41_channel { struct dma_chan chan; struct dma_async_tx_descriptor txd; @@ -307,7 +310,7 @@ static irqreturn_t cppi41_irq(int irq, void *data) __iormb(); while (val) { - u32 desc; + u32 desc, len; q_num = __fls(val); val &= ~(1 << q_num); @@ -319,9 +322,13 @@ static irqreturn_t cppi41_irq(int irq, void *data) q_num, desc); continue; } - c->residue = pd_trans_len(c->desc->pd6) - - pd_trans_len(c->desc->pd0); + if (c->desc->pd2 & PD2_ZERO_LENGTH) + len = 0; + else + len = pd_trans_len(c->desc->pd0); + + c->residue = pd_trans_len(c->desc->pd6) - len; dma_cookie_complete(&c->txd); c->txd.callback(c->txd.callback_param); } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 128714622bf5..14867e3ac8ff 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -255,6 +255,7 @@ struct sdma_channel { enum dma_slave_buswidth word_size; unsigned int buf_tail; unsigned int num_bd; + unsigned int period_len; struct sdma_buffer_descriptor *bd; dma_addr_t bd_phys; unsigned int pc_from_device, pc_to_device; @@ -593,6 +594,12 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) static void sdma_handle_channel_loop(struct sdma_channel *sdmac) { + if (sdmac->desc.callback) + sdmac->desc.callback(sdmac->desc.callback_param); +} + +static void sdma_update_channel_loop(struct sdma_channel *sdmac) +{ struct sdma_buffer_descriptor *bd; /* @@ -611,9 +618,6 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac) bd->mode.status |= BD_DONE; sdmac->buf_tail++; sdmac->buf_tail %= sdmac->num_bd; - - if (sdmac->desc.callback) - sdmac->desc.callback(sdmac->desc.callback_param); } } @@ -669,6 +673,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id) int channel = fls(stat) - 1; struct sdma_channel *sdmac = &sdma->channel[channel]; + if (sdmac->flags & IMX_DMA_SG_LOOP) + sdma_update_channel_loop(sdmac); + tasklet_schedule(&sdmac->tasklet); __clear_bit(channel, &stat); @@ -1129,6 +1136,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( sdmac->status = DMA_IN_PROGRESS; sdmac->buf_tail = 0; + sdmac->period_len = period_len; sdmac->flags |= IMX_DMA_SG_LOOP; sdmac->direction = direction; @@ -1225,9 +1233,15 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, struct dma_tx_state *txstate) { struct sdma_channel *sdmac = to_sdma_chan(chan); + u32 residue; + + if (sdmac->flags & IMX_DMA_SG_LOOP) + residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len; + else + residue = sdmac->chn_count - sdmac->chn_real_count; dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, - sdmac->chn_count - sdmac->chn_real_count); + residue); return sdmac->status; } diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index 4199849e3758..145974f9662b 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig @@ -1,4 +1,5 @@ menu "IEEE 1394 (FireWire) support" + depends on HAS_DMA depends on PCI || COMPILE_TEST # firewire-core does not depend on PCI but is # not useful without PCI controller driver diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 57985410f12f..a66a3217f1d9 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -336,10 +336,10 @@ static const struct { QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE}, {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0, - QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, + QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI}, {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID, - 0}, + QUIRK_NO_MSI}, {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index eff1a2f22f09..dc79346689e6 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -346,6 +346,7 @@ static __initdata struct { struct param_info { int verbose; + int found; void *params; }; @@ -362,16 +363,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname, (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) return 0; - pr_info("Getting parameters from FDT:\n"); - for (i = 0; i < ARRAY_SIZE(dt_params); i++) { prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len); - if (!prop) { - pr_err("Can't find %s in device tree!\n", - dt_params[i].name); + if (!prop) return 0; - } dest = info->params + dt_params[i].offset; + info->found++; val = of_read_number(prop, len / sizeof(u32)); @@ -390,10 +387,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname, int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose) { struct param_info info; + int ret; + + pr_info("Getting EFI parameters from FDT:\n"); info.verbose = verbose; + info.found = 0; info.params = params; - return of_scan_flat_dt(fdt_find_uefi_params, &info); + ret = of_scan_flat_dt(fdt_find_uefi_params, &info); + if (!info.found) + pr_info("UEFI not found.\n"); + else if (!ret) + pr_err("Can't find '%s' in device tree!\n", + dt_params[info.found].name); + + return ret; } #endif /* CONFIG_EFI_PARAMS_FROM_FDT */ diff --git a/drivers/firmware/efi/fdt.c b/drivers/firmware/efi/fdt.c index 82d774161cc9..507a3df46a5d 100644 --- a/drivers/firmware/efi/fdt.c +++ b/drivers/firmware/efi/fdt.c @@ -23,16 +23,6 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, u32 fdt_val32; u64 fdt_val64; - /* - * Copy definition of linux_banner here. Since this code is - * built as part of the decompressor for ARM v7, pulling - * in version.c where linux_banner is defined for the - * kernel brings other kernel dependencies with it. - */ - const char linux_banner[] = - "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@" - LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n"; - /* Do some checks on provided FDT, if it exists*/ if (orig_fdt) { if (fdt_check_header(orig_fdt)) { diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index fe7c0e211f9a..57adbc90fdad 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c @@ -900,8 +900,6 @@ static int mcp23s08_probe(struct spi_device *spi) if (spi_present_mask & (1 << addr)) chips++; } - if (!chips) - return -ENODEV; } else { type = spi_get_device_id(spi)->driver_data; pdata = dev_get_platdata(&spi->dev); @@ -940,10 +938,6 @@ static int mcp23s08_probe(struct spi_device *spi) if (!(spi_present_mask & (1 << addr))) continue; chips--; - if (chips < 0) { - dev_err(&spi->dev, "FATAL: invalid negative chip id\n"); - goto fail; - } data->mcp[addr] = &data->chip[chips]; status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi, 0x40 | (addr << 1), type, base, diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 0c9f803fc1ac..b6ae89ea8811 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq, static struct irq_domain_ops gpio_rcar_irq_domain_ops = { .map = gpio_rcar_irq_domain_map, + .xlate = irq_domain_xlate_twocell, }; struct gpio_rcar_info { diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 8218078b6133..8218078b6133 100755..100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 240c331405b9..ac357b02bd35 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -810,6 +810,12 @@ static int tda998x_encoder_mode_valid(struct drm_encoder *encoder, struct drm_display_mode *mode) { + if (mode->clock > 150000) + return MODE_CLOCK_HIGH; + if (mode->htotal >= BIT(13)) + return MODE_BAD_HVALUE; + if (mode->vtotal >= BIT(11)) + return MODE_BAD_VVALUE; return MODE_OK; } @@ -1048,8 +1054,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk) return i; } } else { - for (i = 10; i > 0; i--) { - msleep(10); + for (i = 100; i > 0; i--) { + msleep(1); ret = reg_read(priv, REG_INT_FLAGS_2); if (ret < 0) return ret; @@ -1183,7 +1189,6 @@ static void tda998x_encoder_destroy(struct drm_encoder *encoder) { struct tda998x_priv *priv = to_tda998x_priv(encoder); - drm_i2c_encoder_destroy(encoder); /* disable all IRQs and free the IRQ handler */ cec_write(priv, REG_CEC_RXSHPDINTENA, 0); @@ -1193,6 +1198,7 @@ tda998x_encoder_destroy(struct drm_encoder *encoder) if (priv->cec) i2c_unregister_device(priv->cec); + drm_i2c_encoder_destroy(encoder); kfree(priv); } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6c656392d67d..d44344140627 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1464,12 +1464,13 @@ static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) #else static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) { - int ret; + int ret = 0; DRM_INFO("Replacing VGA console driver\n"); console_lock(); - ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); + if (con_is_bound(&vga_con)) + ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); if (ret == 0) { ret = do_unregister_con_driver(&vga_con); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a47fbf60b781..374f964323ad 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -656,6 +656,7 @@ enum intel_sbi_destination { #define QUIRK_PIPEA_FORCE (1<<0) #define QUIRK_LVDS_SSC_DISABLE (1<<1) #define QUIRK_INVERT_BRIGHTNESS (1<<2) +#define QUIRK_BACKLIGHT_PRESENT (1<<3) struct intel_fbdev; struct intel_fbc_work; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f36126383d26..d893e4da5dce 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1616,22 +1616,6 @@ out: return ret; } -void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) -{ - struct i915_vma *vma; - - /* - * Only the global gtt is relevant for gtt memory mappings, so restrict - * list traversal to objects bound into the global address space. Note - * that the active list should be empty, but better safe than sorry. - */ - WARN_ON(!list_empty(&dev_priv->gtt.base.active_list)); - list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list) - i915_gem_release_mmap(vma->obj); - list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list) - i915_gem_release_mmap(vma->obj); -} - /** * i915_gem_release_mmap - remove physical page mappings * @obj: obj in question @@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) obj->fault_mappable = false; } +void +i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) +{ + struct drm_i915_gem_object *obj; + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) + i915_gem_release_mmap(obj); +} + uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) { diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 3521f998a178..34894b573064 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -31,7 +31,7 @@ struct i915_render_state { struct drm_i915_gem_object *obj; unsigned long ggtt_offset; - void *batch; + u32 *batch; u32 size; u32 len; }; @@ -80,7 +80,7 @@ free: static void render_state_free(struct i915_render_state *so) { - kunmap(so->batch); + kunmap(kmap_to_page(so->batch)); i915_gem_object_ggtt_unpin(so->obj); drm_gem_object_unreference(&so->obj->base); kfree(so); diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 62ef55ba061c..7465ab0fd396 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) if (base == 0) return 0; + /* make sure we don't clobber the GTT if it's within stolen memory */ + if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) { + struct { + u32 start, end; + } stolen[2] = { + { .start = base, .end = base + dev_priv->gtt.stolen_size, }, + { .start = base, .end = base + dev_priv->gtt.stolen_size, }, + }; + u64 gtt_start, gtt_end; + + gtt_start = I915_READ(PGTBL_CTL); + if (IS_GEN4(dev)) + gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | + (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; + else + gtt_start &= PGTBL_ADDRESS_LO_MASK; + gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4; + + if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) + stolen[0].end = gtt_start; + if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) + stolen[1].start = gtt_end; + + /* pick the larger of the two chunks */ + if (stolen[0].end - stolen[0].start > + stolen[1].end - stolen[1].start) { + base = stolen[0].start; + dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start; + } else { + base = stolen[1].start; + dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start; + } + + if (stolen[0].start != stolen[1].start || + stolen[0].end != stolen[1].end) { + DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", + (unsigned long long) gtt_start, + (unsigned long long) gtt_end - 1); + DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", + base, base + (u32) dev_priv->gtt.stolen_size - 1); + } + } + + /* Verify that nothing else uses this physical address. Stolen * memory should be reserved by the BIOS and hidden from the * kernel. So if the region is already marked as busy, something diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 267f069765ad..c05c84f3f091 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct intel_engine_cs *signaller; - u32 seqno, ctl; + u32 seqno; ring->hangcheck.deadlock++; @@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring) if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) return -1; - /* cursory check for an unkickable deadlock */ - ctl = I915_READ_CTL(signaller); - if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) - return -1; - if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) return 1; - if (signaller->hangcheck.deadlock) + /* cursory check for an unkickable deadlock */ + if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && + semaphore_passed(signaller) < 0) return -1; return 0; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e691b30b2817..a5bab61bfc00 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -942,6 +942,9 @@ enum punit_power_well { /* * Instruction and interrupt control regs */ +#define PGTBL_CTL 0x02020 +#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */ +#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */ #define PGTBL_ER 0x02024 #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5f285fba4e41..f0be855ddf45 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2087,6 +2087,7 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv, static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, enum plane plane, enum pipe pipe) { + struct drm_device *dev = dev_priv->dev; struct intel_crtc *intel_crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); int reg; @@ -2106,6 +2107,14 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); intel_flush_primary_plane(dev_priv, plane); + + /* + * BDW signals flip done immediately if the plane + * is disabled, even if the plane enable is already + * armed to occur at the next vblank :( + */ + if (IS_BROADWELL(dev)) + intel_wait_for_vblank(dev, intel_crtc->pipe); } /** @@ -11088,6 +11097,22 @@ const char *intel_output_name(int output) return names[output]; } +static bool intel_crt_present(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_ULT(dev)) + return false; + + if (IS_CHERRYVIEW(dev)) + return false; + + if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support) + return false; + + return true; +} + static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -11096,7 +11121,7 @@ static void intel_setup_outputs(struct drm_device *dev) intel_lvds_init(dev); - if (!IS_ULT(dev) && !IS_CHERRYVIEW(dev) && dev_priv->vbt.int_crt_support) + if (intel_crt_present(dev)) intel_crt_init(dev); if (HAS_DDI(dev)) { @@ -11566,6 +11591,14 @@ static void quirk_invert_brightness(struct drm_device *dev) DRM_INFO("applying inverted panel brightness quirk\n"); } +/* Some VBT's incorrectly indicate no backlight is present */ +static void quirk_backlight_present(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; + DRM_INFO("applying backlight present quirk\n"); +} + struct intel_quirk { int device; int subsystem_vendor; @@ -11634,6 +11667,15 @@ static struct intel_quirk intel_quirks[] = { /* Acer Aspire 5336 */ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, + + /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ + { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, + + /* Toshiba CB35 Chromebook (Celeron 2955U) */ + { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, + + /* HP Chromebook 14 (Celeron 2955U) */ + { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, }; static void intel_init_quirks(struct drm_device *dev) @@ -11872,6 +11914,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) * ... */ plane = crtc->plane; crtc->plane = !plane; + crtc->primary_enabled = true; dev_priv->display.crtc_disable(&crtc->base); crtc->plane = plane; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 52fda950fd2a..8a1a4fbc06ac 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -28,6 +28,8 @@ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/export.h> +#include <linux/notifier.h> +#include <linux/reboot.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> @@ -336,6 +338,37 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp) return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); } +/* Reboot notifier handler to shutdown panel power to guarantee T12 timing + This function only applicable when panel PM state is not to be tracked */ +static int edp_notify_handler(struct notifier_block *this, unsigned long code, + void *unused) +{ + struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), + edp_notifier); + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp_div; + u32 pp_ctrl_reg, pp_div_reg; + enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + + if (!is_edp(intel_dp) || code != SYS_RESTART) + return 0; + + if (IS_VALLEYVIEW(dev)) { + pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); + pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); + pp_div = I915_READ(pp_div_reg); + pp_div &= PP_REFERENCE_DIVIDER_MASK; + + /* 0x1F write to PP_DIV_REG sets max cycle delay */ + I915_WRITE(pp_div_reg, pp_div | 0x1F); + I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF); + msleep(intel_dp->panel_power_cycle_delay); + } + + return 0; +} + static bool edp_have_panel_power(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); @@ -873,8 +906,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, bpp); - for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { - for (clock = min_clock; clock <= max_clock; clock++) { + for (clock = min_clock; clock <= max_clock; clock++) { + for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); link_avail = intel_dp_max_data_rate(link_clock, lane_count); @@ -3707,6 +3740,10 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); edp_panel_vdd_off_sync(intel_dp); drm_modeset_unlock(&dev->mode_config.connection_mutex); + if (intel_dp->edp_notifier.notifier_call) { + unregister_reboot_notifier(&intel_dp->edp_notifier); + intel_dp->edp_notifier.notifier_call = NULL; + } } kfree(intel_dig_port); } @@ -4184,6 +4221,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, } mutex_unlock(&dev->mode_config.mutex); + if (IS_VALLEYVIEW(dev)) { + intel_dp->edp_notifier.notifier_call = edp_notify_handler; + register_reboot_notifier(&intel_dp->edp_notifier); + } + intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); intel_panel_setup_backlight(connector); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index eaa27ee9e367..f67340ed2c12 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -538,6 +538,8 @@ struct intel_dp { unsigned long last_power_on; unsigned long last_backlight_off; bool psr_setup_done; + struct notifier_block edp_notifier; + bool use_tps3; struct intel_connector *attached_connector; diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 02f99d768d49..3fd082933c87 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -117,17 +117,18 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) /* bandgap reset is needed after everytime we do power gate */ band_gap_reset(dev_priv); + I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER); + usleep_range(2500, 3000); + val = I915_READ(MIPI_PORT_CTRL(pipe)); I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD); usleep_range(1000, 1500); - I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT); - usleep_range(2000, 2500); - I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY); - usleep_range(2000, 2500); - I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00); - usleep_range(2000, 2500); + + I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT); + usleep_range(2500, 3000); + I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY); - usleep_range(2000, 2500); + usleep_range(2500, 3000); } static void intel_dsi_enable(struct intel_encoder *encoder) @@ -271,23 +272,23 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) DRM_DEBUG_KMS("\n"); - I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER); + I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER); usleep_range(2000, 2500); - I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT); + I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT); usleep_range(2000, 2500); - I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER); + I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER); usleep_range(2000, 2500); - val = I915_READ(MIPI_PORT_CTRL(pipe)); - I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD); - usleep_range(1000, 1500); - if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT) == 0x00000), 30)) DRM_ERROR("DSI LP not going Low\n"); + val = I915_READ(MIPI_PORT_CTRL(pipe)); + I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD); + usleep_range(1000, 1500); + I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00); usleep_range(2000, 2500); diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c index 3eeb21b9fddf..933c86305237 100644 --- a/drivers/gpu/drm/i915/intel_dsi_cmd.c +++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c @@ -404,12 +404,6 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs) else cmd |= DPI_LP_MODE; - /* DPI virtual channel?! */ - - mask = DPI_FIFO_EMPTY; - if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50)) - DRM_ERROR("Timeout waiting for DPI FIFO empty.\n"); - /* clear bit */ I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 23126023aeba..5e5a72fca5fb 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -111,6 +111,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, pipe_config->adjusted_mode.flags |= flags; + /* gen2/3 store dither state in pfit control, needs to match */ + if (INTEL_INFO(dev)->gen < 4) { + tmp = I915_READ(PFIT_CONTROL); + + pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; + } + dotclock = pipe_config->port_clock; if (HAS_PCH_SPLIT(dev_priv->dev)) diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 2e2c71fcc9ed..4f6b53998d79 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -403,6 +403,15 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); + /* + * If the acpi_video interface is not supposed to be used, don't + * bother processing backlight level change requests from firmware. + */ + if (!acpi_video_verify_backlight_support()) { + DRM_DEBUG_KMS("opregion backlight request ignored\n"); + return 0; + } + if (!(bclp & ASLE_BCLP_VALID)) return ASLC_BACKLIGHT_FAILED; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 38a98570d10c..12b02fe1d0ae 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | PFIT_FILTER_FUZZY); - /* Make sure pre-965 set dither correctly for 18bpp panels. */ - if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18) - pfit_control |= PANEL_8TO6_DITHER_ENABLE; - out: if ((pfit_control & PFIT_ENABLE) == 0) { pfit_control = 0; pfit_pgm_ratios = 0; } + /* Make sure pre-965 set dither correctly for 18bpp panels. */ + if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18) + pfit_control |= PANEL_8TO6_DITHER_ENABLE; + pipe_config->gmch_pfit.control = pfit_control; pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios; pipe_config->gmch_pfit.lvds_border_bits = border; @@ -1118,8 +1118,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector) int ret; if (!dev_priv->vbt.backlight.present) { - DRM_DEBUG_KMS("native backlight control not available per VBT\n"); - return 0; + if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) { + DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n"); + } else { + DRM_DEBUG_KMS("no backlight present per VBT\n"); + return 0; + } } /* set level and max in panel struct */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 9ad0c6afc487..ee72807069e4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3209,6 +3209,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val) */ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) { + struct drm_device *dev = dev_priv->dev; + + /* Latest VLV doesn't need to force the gfx clock */ + if (dev->pdev->revision >= 0xd) { + valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); + return; + } + /* * When we are idle. Drop to min voltage state. */ @@ -6038,6 +6046,27 @@ int i915_release_power_well(void) } EXPORT_SYMBOL_GPL(i915_release_power_well); +/* + * Private interface for the audio driver to get CDCLK in kHz. + * + * Caller must request power well using i915_request_power_well() prior to + * making the call. + */ +int i915_get_cdclk_freq(void) +{ + struct drm_i915_private *dev_priv; + + if (!hsw_pwr) + return -ENODEV; + + dev_priv = container_of(hsw_pwr, struct drm_i915_private, + power_domains); + + return intel_ddi_get_cdclk_freq(dev_priv); +} +EXPORT_SYMBOL_GPL(i915_get_cdclk_freq); + + #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) #define HSW_ALWAYS_ON_POWER_DOMAINS ( \ diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 1b66ddcdfb33..9a17b4e92ef4 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -691,6 +691,14 @@ intel_post_enable_primary(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); /* + * BDW signals flip done immediately if the plane + * is disabled, even if the plane enable is already + * armed to occur at the next vblank :( + */ + if (IS_BROADWELL(dev)) + intel_wait_for_vblank(dev, intel_crtc->pipe); + + /* * FIXME IPS should be fine as long as one plane is * enabled, but in practice it seems to have problems * when going from primary only to sprite only and vice diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 26e962b7e702..2283c442a10d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -1516,11 +1516,11 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head) } switch ((ctrl & 0x000f0000) >> 16) { - case 6: datarate = pclk * 30 / 8; break; - case 5: datarate = pclk * 24 / 8; break; + case 6: datarate = pclk * 30; break; + case 5: datarate = pclk * 24; break; case 2: default: - datarate = pclk * 18 / 8; + datarate = pclk * 18; break; } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c index 48aa38a87e3f..fa30d8196f35 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c @@ -1159,11 +1159,11 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head) if (outp->info.type == DCB_OUTPUT_DP) { u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300)); switch ((sync & 0x000003c0) >> 6) { - case 6: pclk = pclk * 30 / 8; break; - case 5: pclk = pclk * 24 / 8; break; + case 6: pclk = pclk * 30; break; + case 5: pclk = pclk * 24; break; case 2: default: - pclk = pclk * 18 / 8; + pclk = pclk * 18; break; } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c index 52c299c3d300..eb2d7789555d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c @@ -34,7 +34,7 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait) struct nvkm_output_dp *outp = (void *)base; bool retrain = true; u8 link[2], stat[3]; - u32 rate; + u32 linkrate; int ret, i; /* check that the link is trained at a high enough rate */ @@ -44,8 +44,10 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait) goto done; } - rate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET); - if (rate < ((datarate / 8) * 10)) { + linkrate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET); + linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */ + datarate = (datarate + 9) / 10; /* -> decakilobits */ + if (linkrate < datarate) { DBG("link not trained at sufficient rate\n"); goto done; } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c index e1832778e8b6..7a1ebdfa9e1b 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c @@ -87,6 +87,7 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) struct nvkm_output_dp *outpdp = (void *)outp; switch (data) { case NV94_DISP_SOR_DP_PWR_STATE_OFF: + nouveau_event_put(outpdp->irq); ((struct nvkm_output_dp_impl *)nv_oclass(outp)) ->lnk_pwr(outpdp, 0); atomic_set(&outpdp->lt.done, 0); diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h index 0f57fcfe0bbf..2af9cfd2c60f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h @@ -26,7 +26,7 @@ ramfuc_reg2(u32 addr1, u32 addr2) }; } -static inline struct ramfuc_reg +static noinline struct ramfuc_reg ramfuc_reg(u32 addr) { return ramfuc_reg2(addr, addr); @@ -107,7 +107,7 @@ ramfuc_nsec(struct ramfuc *ram, u32 nsec) #define ram_init(s,p) ramfuc_init(&(s)->base, (p)) #define ram_exec(s,e) ramfuc_exec(&(s)->base, (e)) -#define ram_have(s,r) ((s)->r_##r.addr != 0x000000) +#define ram_have(s,r) ((s)->r_##r.addr[0] != 0x000000) #define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r) #define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d)) #define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r) diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c index 1ad3ea503133..c5b46e302319 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c @@ -200,6 +200,7 @@ r1373f4_init(struct nve0_ramfuc *fuc) /* (re)program mempll, if required */ if (ram->mode == 2) { ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000); + ram_mask(fuc, 0x132000, 0x80000000, 0x80000000); ram_mask(fuc, 0x132000, 0x00000001, 0x00000000); ram_mask(fuc, 0x132004, 0x103fffff, mcoef); ram_mask(fuc, 0x132000, 0x00000001, 0x00000001); diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c index cfde9eb44ad0..6212537b90c5 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c @@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm) nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown, NOUVEAU_THERM_THRS_SHUTDOWN); + spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); + /* schedule the next poll in one second */ if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) - ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); - - spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); + ptimer->alarm(ptimer, 1000000000ULL, alarm); } void diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index ddd83756b9a2..5425ffe3931d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -652,12 +652,12 @@ int nouveau_pmops_resume(struct device *dev) ret = nouveau_do_resume(drm_dev); if (ret) return ret; - if (drm_dev->mode_config.num_crtc) - nouveau_fbcon_set_suspend(drm_dev, 0); - nouveau_fbcon_zfill_all(drm_dev); - if (drm_dev->mode_config.num_crtc) + if (drm_dev->mode_config.num_crtc) { nouveau_display_resume(drm_dev); + nouveau_fbcon_set_suspend(drm_dev, 0); + } + return 0; } @@ -683,11 +683,12 @@ static int nouveau_pmops_thaw(struct device *dev) ret = nouveau_do_resume(drm_dev); if (ret) return ret; - if (drm_dev->mode_config.num_crtc) - nouveau_fbcon_set_suspend(drm_dev, 0); - nouveau_fbcon_zfill_all(drm_dev); - if (drm_dev->mode_config.num_crtc) + + if (drm_dev->mode_config.num_crtc) { nouveau_display_resume(drm_dev); + nouveau_fbcon_set_suspend(drm_dev, 0); + } + return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 64a42cfd3717..191665ee7f52 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -531,17 +531,10 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state) if (state == 1) nouveau_fbcon_save_disable_accel(dev); fb_set_suspend(drm->fbcon->helper.fbdev, state); - if (state == 0) + if (state == 0) { nouveau_fbcon_restore_accel(dev); + nouveau_fbcon_zfill(dev, drm->fbcon); + } console_unlock(); } } - -void -nouveau_fbcon_zfill_all(struct drm_device *dev) -{ - struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->fbcon) { - nouveau_fbcon_zfill(dev, drm->fbcon); - } -} diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index fdfc0c94fbcc..fcff797d2084 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -61,7 +61,6 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info); int nouveau_fbcon_init(struct drm_device *dev); void nouveau_fbcon_fini(struct drm_device *dev); void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); -void nouveau_fbcon_zfill_all(struct drm_device *dev); void nouveau_fbcon_save_disable_accel(struct drm_device *dev); void nouveau_fbcon_restore_accel(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index afdf607df3e6..4c534b7b04da 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1741,7 +1741,8 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) } } - mthd = (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2; + mthd = (ffs(nv_encoder->dcb->heads) - 1) << 3; + mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2; mthd |= nv_encoder->or; if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c index 34d6a85e9023..0bf1e20c6e44 100644 --- a/drivers/gpu/drm/qxl/qxl_irq.c +++ b/drivers/gpu/drm/qxl/qxl_irq.c @@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg) pending = xchg(&qdev->ram_header->int_pending, 0); + if (!pending) + return IRQ_NONE; + atomic_inc(&qdev->irq_received); if (pending & QXL_INTERRUPT_DISPLAY) { diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a03c73411a56..30d242b25078 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); - /* set pageflip to happen anywhere in vblank interval */ - WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); + /* set pageflip to happen only at start of vblank interval (front porch) */ + WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); @@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); - /* set pageflip to happen anywhere in vblank interval */ - WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); + /* set pageflip to happen only at start of vblank interval (front porch) */ + WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index c5b1f2da3954..b1e11f8434e2 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -127,7 +127,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, /* flags not zero */ if (args.v1.ucReplyStatus == 2) { DRM_DEBUG_KMS("dp_aux_ch flags not zero\n"); - r = -EBUSY; + r = -EIO; goto done; } @@ -403,16 +403,18 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 msg[DP_DPCD_SIZE]; - int ret, i; + int ret; + + char dpcd_hex_dump[DP_DPCD_SIZE * 3]; ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, DP_DPCD_SIZE); if (ret > 0) { memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); - DRM_DEBUG_KMS("DPCD: "); - for (i = 0; i < DP_DPCD_SIZE; i++) - DRM_DEBUG_KMS("%02x ", msg[i]); - DRM_DEBUG_KMS("\n"); + + hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd), + 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); + DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); radeon_dp_probe_oui(radeon_connector); diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 2b2908440644..7d68203a3737 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, struct backlight_properties props; struct radeon_backlight_privdata *pdata; struct radeon_encoder_atom_dig *dig; - u8 backlight_level; char bl_name[16]; /* Mac laptops with multiple GPUs use the gmux driver for backlight @@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, pdata->encoder = radeon_encoder; - backlight_level = radeon_atom_get_backlight_level_from_reg(rdev); - dig = radeon_encoder->enc_priv; dig->bl_dev = bd; bd->props.brightness = radeon_atom_backlight_get_brightness(bd); + /* Set a reasonable default here if the level is 0 otherwise + * fbdev will attempt to turn the backlight on after console + * unblanking and it will try and restore 0 which turns the backlight + * off again. + */ + if (bd->props.brightness == 0) + bd->props.brightness = RADEON_MAX_BL_LEVEL; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 10dae4106c08..584090ac3eb9 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -1179,7 +1179,7 @@ static int ci_stop_dpm(struct radeon_device *rdev) tmp &= ~GLOBAL_PWRMGT_EN; WREG32_SMC(GENERAL_PWRMGT, tmp); - tmp = RREG32(SCLK_PWRMGT_CNTL); + tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); tmp &= ~DYNAMIC_PM_EN; WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index dcd4518a9b08..c0ea66192fe0 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) gb_tile_moden = 0; break; } + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else if (num_pipe_configs == 8) { @@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -7676,14 +7678,16 @@ restart_ih: addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + if (addr == 0x0 && status == 0x0) + break; dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", addr); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); cik_vm_decode_fault(rdev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); break; case 167: /* VCE */ DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data); diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index ae88660f34ea..0c6e1b55d968 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -1752,12 +1752,12 @@ #define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */ #define EOP_TCL1_ACTION_EN (1 << 16) #define EOP_TC_ACTION_EN (1 << 17) /* L2 */ +#define EOP_TCL2_VOLATILE (1 << 24) #define EOP_CACHE_POLICY(x) ((x) << 25) /* 0 - LRU * 1 - Stream * 2 - Bypass */ -#define EOP_TCL2_VOLATILE (1 << 27) #define DATA_SEL(x) ((x) << 29) /* 0 - discard * 1 - send low 32bit data diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 5a9a5f4d7888..47d31e915758 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -1551,7 +1551,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev, table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0; table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); } return 0; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e2f605224e8c..15e4f28015e1 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] = 0x8c1c, 0xffffffff, 0x00001010, 0x28350, 0xffffffff, 0x00000000, 0xa008, 0xffffffff, 0x00010000, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x9508, 0xffffffff, 0x00000002, 0x913c, 0x0000000f, 0x0000000a }; @@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] = 0x8c1c, 0xffffffff, 0x00001010, 0x28350, 0xffffffff, 0x00000000, 0xa008, 0xffffffff, 0x00010000, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x9508, 0xffffffff, 0x00000002 }; @@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] = static const u32 supersumo_golden_registers[] = { 0x5eb4, 0xffffffff, 0x00000002, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x7030, 0xffffffff, 0x00000011, 0x7c30, 0xffffffff, 0x00000011, 0x6104, 0x01000300, 0x00000000, @@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] = static const u32 wrestler_golden_registers[] = { 0x5eb4, 0xffffffff, 0x00000002, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x7030, 0xffffffff, 0x00000011, 0x7c30, 0xffffffff, 0x00000011, 0x6104, 0x01000300, 0x00000000, @@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x3) != 0) { - tmp &= ~0x3; + if ((tmp & 0x7) != 3) { + tmp &= ~0x7; + tmp |= 0x3; WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); @@ -4755,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -5066,14 +5068,16 @@ restart_ih: case 147: addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); + /* reset addr and status */ + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + if (addr == 0x0 && status == 0x0) + break; dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", addr); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); cayman_vm_decode_fault(rdev, status, addr); - /* reset addr and status */ - WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); break; case 176: /* CP_INT in ring buffer */ case 177: /* CP_INT in IB1 */ diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index 333d143fca2c..23bff590fb6e 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -239,7 +239,6 @@ # define EVERGREEN_CRTC_V_BLANK (1 << 0) #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 #define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 -#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 #define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4 #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 3f6e817d97ee..9ef8c38f2d66 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -2726,7 +2726,7 @@ int kv_dpm_init(struct radeon_device *rdev) pi->caps_sclk_ds = true; pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; - pi->bapm_enable = false; + pi->bapm_enable = true; pi->voltage_drop_t = 0; pi->caps_sclk_throttle_low_notification = false; pi->caps_fps = false; /* true? */ diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 004c931606c4..01fc4888e6fe 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -1315,7 +1315,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev, table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0; table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); } } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c66952d4b00c..3c69f58e46ef 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 4b0bbf88d5c0..60c47f829122 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -102,6 +102,7 @@ extern int radeon_runtime_pm; extern int radeon_hard_reset; extern int radeon_vm_size; extern int radeon_vm_block_size; +extern int radeon_deep_color; /* * Copy from radeon_drv.h so we don't have to include both and have conflicting @@ -448,6 +449,7 @@ struct radeon_bo_va { /* protected by vm mutex */ struct list_head vm_list; + struct list_head vm_status; /* constant after initialization */ struct radeon_vm *vm; @@ -683,10 +685,9 @@ struct radeon_flip_work { struct work_struct unpin_work; struct radeon_device *rdev; int crtc_id; - struct drm_framebuffer *fb; + uint64_t base; struct drm_pending_vblank_event *event; struct radeon_bo *old_rbo; - struct radeon_bo *new_rbo; struct radeon_fence *fence; }; @@ -749,10 +750,6 @@ union radeon_irq_stat_regs { struct cik_irq_stat_regs cik; }; -#define RADEON_MAX_HPD_PINS 7 -#define RADEON_MAX_CRTCS 6 -#define RADEON_MAX_AFMT_BLOCKS 7 - struct radeon_irq { bool installed; spinlock_t lock; @@ -871,6 +868,9 @@ struct radeon_vm { struct list_head va; unsigned id; + /* BOs freed, but not yet updated in the PT */ + struct list_head freed; + /* contains the page directory */ struct radeon_bo *page_directory; uint64_t pd_gpu_addr; @@ -879,6 +879,8 @@ struct radeon_vm { /* array of page tables, one for each page directory entry */ struct radeon_vm_pt *page_tables; + struct radeon_bo_va *ib_bo_va; + struct mutex mutex; /* last fence for cs using this vm */ struct radeon_fence *fence; @@ -2836,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev, uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); int radeon_vm_update_page_directory(struct radeon_device *rdev, struct radeon_vm *vm); +int radeon_vm_clear_freed(struct radeon_device *rdev, + struct radeon_vm *vm); int radeon_vm_bo_update(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_bo *bo, + struct radeon_bo_va *bo_va, struct ttm_mem_reg *mem); void radeon_vm_bo_invalidate(struct radeon_device *rdev, struct radeon_bo *bo); @@ -2851,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, struct radeon_bo_va *bo_va, uint64_t offset, uint32_t flags); -int radeon_vm_bo_rmv(struct radeon_device *rdev, - struct radeon_bo_va *bo_va); +void radeon_vm_bo_rmv(struct radeon_device *rdev, + struct radeon_bo_va *bo_va); /* audio */ void r600_audio_update_hdmi(struct work_struct *work); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 30844814c25a..173f378428a9 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1227,11 +1227,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) rdev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); if (rdev->clock.default_dispclk == 0) { - if (ASIC_IS_DCE5(rdev)) + if (ASIC_IS_DCE6(rdev)) + rdev->clock.default_dispclk = 60000; /* 600 Mhz */ + else if (ASIC_IS_DCE5(rdev)) rdev->clock.default_dispclk = 54000; /* 540 Mhz */ else rdev->clock.default_dispclk = 60000; /* 600 Mhz */ } + /* set a reasonable default for DP */ + if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) { + DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", + rdev->clock.default_dispclk / 100); + rdev->clock.default_dispclk = 60000; + } rdev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); rdev->clock.current_dispclk = rdev->clock.default_dispclk; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 1b9177ed181f..44831197e82e 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -199,6 +199,9 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) } } + if ((radeon_deep_color == 0) && (bpc > 8)) + bpc = 8; + DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n", connector->name, connector->display_info.bpc, bpc); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 71a143461478..ae763f60c8a0 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, struct radeon_vm *vm) { struct radeon_device *rdev = p->rdev; + struct radeon_bo_va *bo_va; int i, r; r = radeon_vm_update_page_directory(rdev, vm); if (r) return r; - r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, + r = radeon_vm_clear_freed(rdev, vm); + if (r) + return r; + + if (vm->ib_bo_va == NULL) { + DRM_ERROR("Tmp BO not in VM!\n"); + return -EINVAL; + } + + r = radeon_vm_bo_update(rdev, vm->ib_bo_va, &rdev->ring_tmp_bo.bo->tbo.mem); if (r) return r; @@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, continue; bo = p->relocs[i].robj; - r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem); + bo_va = radeon_vm_bo_find(vm, bo); + if (bo_va == NULL) { + dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); + return -EINVAL; + } + + r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); if (r) return r; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 03686fab842d..697add2cd4e3 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev) if (!radeon_check_pot_argument(radeon_vm_size)) { dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n", radeon_vm_size); - radeon_vm_size = 4096; + radeon_vm_size = 4; } - if (radeon_vm_size < 4) { - dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n", + if (radeon_vm_size < 1) { + dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n", radeon_vm_size); - radeon_vm_size = 4096; + radeon_vm_size = 4; } /* * Max GPUVM size for Cayman, SI and CI are 40 bits. */ - if (radeon_vm_size > 1024*1024) { - dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n", + if (radeon_vm_size > 1024) { + dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n", radeon_vm_size); - radeon_vm_size = 4096; + radeon_vm_size = 4; } /* defines number of bits in page table versus page directory, * a page is 4KB so we have 12 bits offset, minimum 9 bits in the * page table and the remaining bits are in the page directory */ if (radeon_vm_block_size < 9) { - dev_warn(rdev->dev, "VM page table size (%d) to small\n", + dev_warn(rdev->dev, "VM page table size (%d) too small\n", radeon_vm_block_size); radeon_vm_block_size = 9; } if (radeon_vm_block_size > 24 || - radeon_vm_size < (1ull << radeon_vm_block_size)) { - dev_warn(rdev->dev, "VM page table size (%d) to large\n", + (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) { + dev_warn(rdev->dev, "VM page table size (%d) too large\n", radeon_vm_block_size); radeon_vm_block_size = 9; } @@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev, /* Adjust VM size here. * Max GPUVM size for cayman+ is 40 bits. */ - rdev->vm_manager.max_pfn = radeon_vm_size << 8; + rdev->vm_manager.max_pfn = radeon_vm_size << 18; /* Set asic functions */ r = radeon_asic_init(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 8fc362aa6a1a..bf25061c8ac4 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -285,7 +285,6 @@ static void radeon_unpin_work_func(struct work_struct *__work) void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; - struct radeon_flip_work *work; unsigned long flags; u32 update_pending; int vpos, hpos; @@ -295,8 +294,11 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) return; spin_lock_irqsave(&rdev->ddev->event_lock, flags); - work = radeon_crtc->flip_work; - if (work == NULL) { + if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { + DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " + "RADEON_FLIP_SUBMITTED(%d)\n", + radeon_crtc->flip_status, + RADEON_FLIP_SUBMITTED); spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); return; } @@ -344,12 +346,17 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) spin_lock_irqsave(&rdev->ddev->event_lock, flags); work = radeon_crtc->flip_work; - if (work == NULL) { + if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { + DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " + "RADEON_FLIP_SUBMITTED(%d)\n", + radeon_crtc->flip_status, + RADEON_FLIP_SUBMITTED); spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); return; } /* Pageflip completed. Clean up. */ + radeon_crtc->flip_status = RADEON_FLIP_NONE; radeon_crtc->flip_work = NULL; /* wakeup userspace */ @@ -359,7 +366,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); - radeon_fence_unref(&work->fence); radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); queue_work(radeon_crtc->flip_queue, &work->unpin_work); } @@ -379,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work) struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &radeon_crtc->base; - struct drm_framebuffer *fb = work->fb; - - uint32_t tiling_flags, pitch_pixels; - uint64_t base; - unsigned long flags; int r; down_read(&rdev->exclusive_lock); - while (work->fence) { + if (work->fence) { r = radeon_fence_wait(work->fence, false); if (r == -EDEADLK) { up_read(&rdev->exclusive_lock); r = radeon_gpu_reset(rdev); down_read(&rdev->exclusive_lock); } + if (r) + DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); - if (r) { - DRM_ERROR("failed to wait on page flip fence (%d)!\n", - r); - goto cleanup; - } else - radeon_fence_unref(&work->fence); + /* We continue with the page flip even if we failed to wait on + * the fence, otherwise the DRM core and userspace will be + * confused about which BO the CRTC is scanning out + */ + + radeon_fence_unref(&work->fence); } + /* We borrow the event spin lock for protecting flip_status */ + spin_lock_irqsave(&crtc->dev->event_lock, flags); + + /* set the proper interrupt */ + radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); + + /* do the flip (mmio) */ + radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); + + radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + up_read(&rdev->exclusive_lock); +} + +static int radeon_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct radeon_framebuffer *old_radeon_fb; + struct radeon_framebuffer *new_radeon_fb; + struct drm_gem_object *obj; + struct radeon_flip_work *work; + struct radeon_bo *new_rbo; + uint32_t tiling_flags, pitch_pixels; + uint64_t base; + unsigned long flags; + int r; + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (work == NULL) + return -ENOMEM; + + INIT_WORK(&work->flip_work, radeon_flip_work_func); + INIT_WORK(&work->unpin_work, radeon_unpin_work_func); + + work->rdev = rdev; + work->crtc_id = radeon_crtc->crtc_id; + work->event = event; + + /* schedule unpin of the old buffer */ + old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); + obj = old_radeon_fb->obj; + + /* take a reference to the old object */ + drm_gem_object_reference(obj); + work->old_rbo = gem_to_radeon_bo(obj); + + new_radeon_fb = to_radeon_framebuffer(fb); + obj = new_radeon_fb->obj; + new_rbo = gem_to_radeon_bo(obj); + + spin_lock(&new_rbo->tbo.bdev->fence_lock); + if (new_rbo->tbo.sync_obj) + work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); + spin_unlock(&new_rbo->tbo.bdev->fence_lock); + /* pin the new buffer */ - DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", - work->old_rbo, work->new_rbo); + DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", + work->old_rbo, new_rbo); - r = radeon_bo_reserve(work->new_rbo, false); + r = radeon_bo_reserve(new_rbo, false); if (unlikely(r != 0)) { DRM_ERROR("failed to reserve new rbo buffer before flip\n"); goto cleanup; } /* Only 27 bit offset for legacy CRTC */ - r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM, + r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM, ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); if (unlikely(r != 0)) { - radeon_bo_unreserve(work->new_rbo); + radeon_bo_unreserve(new_rbo); r = -EINVAL; DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto cleanup; } - radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL); - radeon_bo_unreserve(work->new_rbo); + radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); + radeon_bo_unreserve(new_rbo); if (!ASIC_IS_AVIVO(rdev)) { /* crtc offset is from display base addr not FB location */ @@ -460,6 +523,7 @@ static void radeon_flip_work_func(struct work_struct *__work) } base &= ~7; } + work->base = base; r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); if (r) { @@ -470,98 +534,42 @@ static void radeon_flip_work_func(struct work_struct *__work) /* We borrow the event spin lock for protecting flip_work */ spin_lock_irqsave(&crtc->dev->event_lock, flags); - /* set the proper interrupt */ - radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); + if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + r = -EBUSY; + goto vblank_cleanup; + } + radeon_crtc->flip_status = RADEON_FLIP_PENDING; + radeon_crtc->flip_work = work; - /* do the flip (mmio) */ - radeon_page_flip(rdev, radeon_crtc->crtc_id, base); + /* update crtc fb */ + crtc->primary->fb = fb; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - up_read(&rdev->exclusive_lock); - return; + queue_work(radeon_crtc->flip_queue, &work->flip_work); + return 0; + +vblank_cleanup: + drm_vblank_put(crtc->dev, radeon_crtc->crtc_id); pflip_cleanup: - if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) { + if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); goto cleanup; } - if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) { + if (unlikely(radeon_bo_unpin(new_rbo) != 0)) { DRM_ERROR("failed to unpin new rbo in error path\n"); } - radeon_bo_unreserve(work->new_rbo); + radeon_bo_unreserve(new_rbo); cleanup: drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); radeon_fence_unref(&work->fence); kfree(work); - up_read(&rdev->exclusive_lock); -} - -static int radeon_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) -{ - struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - struct radeon_framebuffer *old_radeon_fb; - struct radeon_framebuffer *new_radeon_fb; - struct drm_gem_object *obj; - struct radeon_flip_work *work; - unsigned long flags; - - work = kzalloc(sizeof *work, GFP_KERNEL); - if (work == NULL) - return -ENOMEM; - - INIT_WORK(&work->flip_work, radeon_flip_work_func); - INIT_WORK(&work->unpin_work, radeon_unpin_work_func); - - work->rdev = rdev; - work->crtc_id = radeon_crtc->crtc_id; - work->fb = fb; - work->event = event; - /* schedule unpin of the old buffer */ - old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); - obj = old_radeon_fb->obj; - - /* take a reference to the old object */ - drm_gem_object_reference(obj); - work->old_rbo = gem_to_radeon_bo(obj); - - new_radeon_fb = to_radeon_framebuffer(fb); - obj = new_radeon_fb->obj; - work->new_rbo = gem_to_radeon_bo(obj); - - spin_lock(&work->new_rbo->tbo.bdev->fence_lock); - if (work->new_rbo->tbo.sync_obj) - work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj); - spin_unlock(&work->new_rbo->tbo.bdev->fence_lock); - - /* We borrow the event spin lock for protecting flip_work */ - spin_lock_irqsave(&crtc->dev->event_lock, flags); - - if (radeon_crtc->flip_work) { - DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); - radeon_fence_unref(&work->fence); - kfree(work); - return -EBUSY; - } - radeon_crtc->flip_work = work; - - /* update crtc fb */ - crtc->primary->fb = fb; - - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - - queue_work(radeon_crtc->flip_queue, &work->flip_work); - - return 0; + return r; } static int @@ -821,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) struct radeon_device *rdev = dev->dev_private; int ret = 0; + /* don't leak the edid if we already fetched it in detect() */ + if (radeon_connector->edid) + goto got_edid; + /* on hw with routers, select right port */ if (radeon_connector->router.ddc_valid) radeon_router_select_ddc_port(radeon_connector); @@ -859,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); } if (radeon_connector->edid) { +got_edid: drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 6e3017413386..e9e361084249 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -173,8 +173,9 @@ int radeon_dpm = -1; int radeon_aspm = -1; int radeon_runtime_pm = -1; int radeon_hard_reset = 0; -int radeon_vm_size = 4096; +int radeon_vm_size = 4; int radeon_vm_block_size = 9; +int radeon_deep_color = 0; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -242,12 +243,15 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444); MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); module_param_named(hard_reset, radeon_hard_reset, int, 0444); -MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)"); +MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)"); module_param_named(vm_size, radeon_vm_size, int, 0444); MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)"); module_param_named(vm_block_size, radeon_vm_block_size, int, 0444); +MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))"); +module_param_named(deep_color, radeon_deep_color, int, 0444); + static struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 35d931881b4b..d25ae6acfd5a 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { struct radeon_fpriv *fpriv; - struct radeon_bo_va *bo_va; + struct radeon_vm *vm; int r; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); @@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return -ENOMEM; } - r = radeon_vm_init(rdev, &fpriv->vm); + vm = &fpriv->vm; + r = radeon_vm_init(rdev, vm); if (r) { kfree(fpriv); return r; @@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) if (rdev->accel_working) { r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (r) { - radeon_vm_fini(rdev, &fpriv->vm); + radeon_vm_fini(rdev, vm); kfree(fpriv); return r; } /* map the ib pool buffer read only into * virtual address space */ - bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, - rdev->ring_tmp_bo.bo); - r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, + vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, + rdev->ring_tmp_bo.bo); + r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, + RADEON_VA_IB_OFFSET, RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); if (r) { - radeon_vm_fini(rdev, &fpriv->vm); + radeon_vm_fini(rdev, vm); kfree(fpriv); return r; } @@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev, /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { struct radeon_fpriv *fpriv = file_priv->driver_priv; - struct radeon_bo_va *bo_va; + struct radeon_vm *vm = &fpriv->vm; int r; if (rdev->accel_working) { r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (!r) { - bo_va = radeon_vm_bo_find(&fpriv->vm, - rdev->ring_tmp_bo.bo); - if (bo_va) - radeon_vm_bo_rmv(rdev, bo_va); + if (vm->ib_bo_va) + radeon_vm_bo_rmv(rdev, vm->ib_bo_va); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); } } - radeon_vm_fini(rdev, &fpriv->vm); + radeon_vm_fini(rdev, vm); kfree(fpriv); file_priv->driver_priv = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index ad0e4b8cc7e3..0592ddb0904b 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -46,6 +46,10 @@ struct radeon_device; #define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) #define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base) +#define RADEON_MAX_HPD_PINS 7 +#define RADEON_MAX_CRTCS 6 +#define RADEON_MAX_AFMT_BLOCKS 7 + enum radeon_rmx_type { RMX_OFF, RMX_FULL, @@ -233,8 +237,8 @@ struct radeon_mode_info { struct card_info *atom_card_info; enum radeon_connector_table connector_table; bool mode_config_initialized; - struct radeon_crtc *crtcs[6]; - struct radeon_afmt *afmt[7]; + struct radeon_crtc *crtcs[RADEON_MAX_CRTCS]; + struct radeon_afmt *afmt[RADEON_MAX_AFMT_BLOCKS]; /* DVI-I properties */ struct drm_property *coherent_mode_property; /* DAC enable load detect */ @@ -302,6 +306,12 @@ struct radeon_atom_ss { uint16_t amount; }; +enum radeon_flip_status { + RADEON_FLIP_NONE, + RADEON_FLIP_PENDING, + RADEON_FLIP_SUBMITTED +}; + struct radeon_crtc { struct drm_crtc base; int crtc_id; @@ -327,6 +337,7 @@ struct radeon_crtc { /* page flipping */ struct workqueue_struct *flip_queue; struct radeon_flip_work *flip_work; + enum radeon_flip_status flip_status; /* pll sharing */ struct radeon_atom_ss ss; bool ss_enabled; diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 12c663e86ca1..e447e390d09a 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -73,8 +73,10 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev) rdev->pm.dpm.ac_power = true; else rdev->pm.dpm.ac_power = false; - if (rdev->asic->dpm.enable_bapm) - radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); + if (rdev->family == CHIP_ARUBA) { + if (rdev->asic->dpm.enable_bapm) + radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); + } mutex_unlock(&rdev->pm.mutex); } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (rdev->pm.profile == PM_PROFILE_AUTO) { diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 899d9126cad6..725d3669014f 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, bo_va->ref_count = 1; INIT_LIST_HEAD(&bo_va->bo_list); INIT_LIST_HEAD(&bo_va->vm_list); + INIT_LIST_HEAD(&bo_va->vm_status); mutex_lock(&vm->mutex); list_add(&bo_va->vm_list, &vm->va); @@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, head = &tmp->vm_list; } + if (bo_va->soffset) { + /* add a clone of the bo_va to clear the old address */ + tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); + if (!tmp) { + mutex_unlock(&vm->mutex); + return -ENOMEM; + } + tmp->soffset = bo_va->soffset; + tmp->eoffset = bo_va->eoffset; + tmp->vm = vm; + list_add(&tmp->vm_status, &vm->freed); + } + bo_va->soffset = soffset; bo_va->eoffset = eoffset; bo_va->flags = flags; @@ -495,7 +509,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, mutex_unlock(&vm->mutex); r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, - RADEON_GPU_PAGE_SIZE, false, + RADEON_GPU_PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &pt); if (r) return r; @@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, * Object have to be reserved and mutex must be locked! */ int radeon_vm_bo_update(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_bo *bo, + struct radeon_bo_va *bo_va, struct ttm_mem_reg *mem) { + struct radeon_vm *vm = bo_va->vm; struct radeon_ib ib; - struct radeon_bo_va *bo_va; unsigned nptes, ndw; uint64_t addr; int r; - bo_va = radeon_vm_bo_find(vm, bo); - if (bo_va == NULL) { - dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); - return -EINVAL; - } if (!bo_va->soffset) { dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", - bo, vm); + bo_va->bo, vm); return -EINVAL; } @@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, trace_radeon_vm_bo_update(bo_va); - nptes = radeon_bo_ngpu_pages(bo); + nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE; /* padding, etc. */ ndw = 64; @@ -911,33 +919,61 @@ int radeon_vm_bo_update(struct radeon_device *rdev, } /** + * radeon_vm_clear_freed - clear freed BOs in the PT + * + * @rdev: radeon_device pointer + * @vm: requested vm + * + * Make sure all freed BOs are cleared in the PT. + * Returns 0 for success. + * + * PTs have to be reserved and mutex must be locked! + */ +int radeon_vm_clear_freed(struct radeon_device *rdev, + struct radeon_vm *vm) +{ + struct radeon_bo_va *bo_va, *tmp; + int r; + + list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { + list_del(&bo_va->vm_status); + r = radeon_vm_bo_update(rdev, bo_va, NULL); + kfree(bo_va); + if (r) + return r; + } + return 0; + +} + +/** * radeon_vm_bo_rmv - remove a bo to a specific vm * * @rdev: radeon_device pointer * @bo_va: requested bo_va * * Remove @bo_va->bo from the requested vm (cayman+). - * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and - * remove the ptes for @bo_va in the page table. - * Returns 0 for success. * * Object have to be reserved! */ -int radeon_vm_bo_rmv(struct radeon_device *rdev, - struct radeon_bo_va *bo_va) +void radeon_vm_bo_rmv(struct radeon_device *rdev, + struct radeon_bo_va *bo_va) { - int r = 0; + struct radeon_vm *vm = bo_va->vm; - mutex_lock(&bo_va->vm->mutex); - if (bo_va->soffset) - r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL); + list_del(&bo_va->bo_list); + mutex_lock(&vm->mutex); list_del(&bo_va->vm_list); - mutex_unlock(&bo_va->vm->mutex); - list_del(&bo_va->bo_list); - kfree(bo_va); - return r; + if (bo_va->soffset) { + bo_va->bo = NULL; + list_add(&bo_va->vm_status, &vm->freed); + } else { + kfree(bo_va); + } + + mutex_unlock(&vm->mutex); } /** @@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) int r; vm->id = 0; + vm->ib_bo_va = NULL; vm->fence = NULL; vm->last_flush = NULL; vm->last_id_use = NULL; mutex_init(&vm->mutex); INIT_LIST_HEAD(&vm->va); + INIT_LIST_HEAD(&vm->freed); pd_size = radeon_vm_directory_size(rdev); pd_entries = radeon_vm_num_pdes(rdev); @@ -992,7 +1030,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) return -ENOMEM; } - r = radeon_bo_create(rdev, pd_size, align, false, + r = radeon_bo_create(rdev, pd_size, align, true, RADEON_GEM_DOMAIN_VRAM, NULL, &vm->page_directory); if (r) @@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) kfree(bo_va); } } - + list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) + kfree(bo_va); for (i = 0; i < radeon_vm_num_pdes(rdev); i++) radeon_bo_unref(&vm->page_tables[i].bo); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 237dd29d9f1c..3e21e869015f 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x3) != 0) { - tmp &= ~0x3; + if ((tmp & 0x7) != 3) { + tmp &= ~0x7; + tmp |= 0x3; WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index da041a43d82e..3c76e1dcdf04 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2329,12 +2329,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev) pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_MEMORY_SS, 0); - /* disable ss, causes hangs on some cayman boards */ - if (rdev->family == CHIP_CAYMAN) { - pi->sclk_ss = false; - pi->mclk_ss = false; - } - if (pi->sclk_ss || pi->mclk_ss) pi->dynamic_ss = true; else diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 730cee2c34cf..9e854fd016da 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -6376,14 +6377,16 @@ restart_ih: case 147: addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); + /* reset addr and status */ + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + if (addr == 0x0 && status == 0x0) + break; dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", addr); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); si_vm_decode_fault(rdev, status, addr); - /* reset addr and status */ - WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); break; case 176: /* RINGID0 CP_INT */ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 2a2822c03329..32e50be9c4ac 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1874,7 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev) for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) pi->at[i] = TRINITY_AT_DFLT; - pi->enable_bapm = false; + /* There are stability issues reported on with + * bapm enabled when switching between AC and battery + * power. At the same time, some MSI boards hang + * if it's not enabled and dpm is enabled. Just enable + * it for MSI boards right now. + */ + if (rdev->pdev->subsystem_vendor == 0x1462) + pi->enable_bapm = true; + else + pi->enable_bapm = false; pi->enable_nbps_policy = true; pi->enable_sclk_ds = true; pi->enable_gfx_power_gating = true; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index a89ad938eacf..b031b48dbb3c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info) vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset); vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); - vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); } diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 800c8b60f7a2..5e79c6ad914f 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -810,7 +810,7 @@ config HID_ZYDACRON config HID_SENSOR_HUB tristate "HID Sensors framework support" - depends on HID + depends on HID && HAS_IOMEM select MFD_CORE default n ---help--- diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 6d00bb9366fa..48b66bbffc94 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -323,6 +323,7 @@ #define USB_VENDOR_ID_ETURBOTOUCH 0x22b9 #define USB_DEVICE_ID_ETURBOTOUCH 0x0006 +#define USB_DEVICE_ID_ETURBOTOUCH_2968 0x2968 #define USB_VENDOR_ID_EZKEY 0x0518 #define USB_DEVICE_ID_BTC_8193 0x0002 @@ -715,6 +716,8 @@ #define USB_VENDOR_ID_PENMOUNT 0x14e1 #define USB_DEVICE_ID_PENMOUNT_PCI 0x3500 +#define USB_DEVICE_ID_PENMOUNT_1610 0x1610 +#define USB_DEVICE_ID_PENMOUNT_1640 0x1640 #define USB_VENDOR_ID_PETALYNX 0x18b1 #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 2451c7e5febd..578bbe65902b 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c @@ -428,6 +428,7 @@ static int rmi_raw_event(struct hid_device *hdev, return 0; } +#ifdef CONFIG_PM static int rmi_post_reset(struct hid_device *hdev) { return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS); @@ -437,6 +438,7 @@ static int rmi_post_resume(struct hid_device *hdev) { return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS); } +#endif /* CONFIG_PM */ #define RMI4_MAX_PAGE 0xff #define RMI4_PAGE_SIZE 0x0100 diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index a8d5c8faf8cf..e244e449cbba 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -159,17 +159,18 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev, { struct hid_sensor_hub_callbacks_list *callback; struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev); + unsigned long flags; - spin_lock(&pdata->dyn_callback_lock); + spin_lock_irqsave(&pdata->dyn_callback_lock, flags); list_for_each_entry(callback, &pdata->dyn_callback_list, list) if (callback->usage_id == usage_id && callback->hsdev == hsdev) { - spin_unlock(&pdata->dyn_callback_lock); + spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags); return -EINVAL; } callback = kzalloc(sizeof(*callback), GFP_ATOMIC); if (!callback) { - spin_unlock(&pdata->dyn_callback_lock); + spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags); return -ENOMEM; } callback->hsdev = hsdev; @@ -177,7 +178,7 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev, callback->usage_id = usage_id; callback->priv = NULL; list_add_tail(&callback->list, &pdata->dyn_callback_list); - spin_unlock(&pdata->dyn_callback_lock); + spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags); return 0; } @@ -188,8 +189,9 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev, { struct hid_sensor_hub_callbacks_list *callback; struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev); + unsigned long flags; - spin_lock(&pdata->dyn_callback_lock); + spin_lock_irqsave(&pdata->dyn_callback_lock, flags); list_for_each_entry(callback, &pdata->dyn_callback_list, list) if (callback->usage_id == usage_id && callback->hsdev == hsdev) { @@ -197,7 +199,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev, kfree(callback); break; } - spin_unlock(&pdata->dyn_callback_lock); + spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags); return 0; } @@ -378,15 +380,16 @@ static int sensor_hub_suspend(struct hid_device *hdev, pm_message_t message) { struct sensor_hub_data *pdata = hid_get_drvdata(hdev); struct hid_sensor_hub_callbacks_list *callback; + unsigned long flags; hid_dbg(hdev, " sensor_hub_suspend\n"); - spin_lock(&pdata->dyn_callback_lock); + spin_lock_irqsave(&pdata->dyn_callback_lock, flags); list_for_each_entry(callback, &pdata->dyn_callback_list, list) { if (callback->usage_callback->suspend) callback->usage_callback->suspend( callback->hsdev, callback->priv); } - spin_unlock(&pdata->dyn_callback_lock); + spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags); return 0; } @@ -395,15 +398,16 @@ static int sensor_hub_resume(struct hid_device *hdev) { struct sensor_hub_data *pdata = hid_get_drvdata(hdev); struct hid_sensor_hub_callbacks_list *callback; + unsigned long flags; hid_dbg(hdev, " sensor_hub_resume\n"); - spin_lock(&pdata->dyn_callback_lock); + spin_lock_irqsave(&pdata->dyn_callback_lock, flags); list_for_each_entry(callback, &pdata->dyn_callback_list, list) { if (callback->usage_callback->resume) callback->usage_callback->resume( callback->hsdev, callback->priv); } - spin_unlock(&pdata->dyn_callback_lock); + spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags); return 0; } @@ -632,6 +636,7 @@ static int sensor_hub_probe(struct hid_device *hdev, if (name == NULL) { hid_err(hdev, "Failed MFD device name\n"); ret = -ENOMEM; + kfree(hsdev); goto err_no_mem; } sd->hid_sensor_hub_client_devs[ diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 59badc10a08c..31e6727cd009 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -49,6 +49,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, @@ -76,6 +77,8 @@ static const struct hid_blacklist { { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index e84f4526eb36..ae22e3c1fc4c 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -339,9 +339,13 @@ static void process_chn_event(u32 relid) */ do { - hv_begin_read(&channel->inbound); + if (read_state) + hv_begin_read(&channel->inbound); channel->onchannel_callback(arg); - bytes_to_read = hv_end_read(&channel->inbound); + if (read_state) + bytes_to_read = hv_end_read(&channel->inbound); + else + bytes_to_read = 0; } while (read_state && (bytes_to_read != 0)); } else { pr_err("no channel callback for relid - %u\n", relid); diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c index eaaa3d843b80..23b2ce294c4c 100644 --- a/drivers/hv/hv_fcopy.c +++ b/drivers/hv/hv_fcopy.c @@ -246,8 +246,8 @@ void hv_fcopy_onchannelcallback(void *context) /* * Send the information to the user-level daemon. */ - fcopy_send_data(); schedule_delayed_work(&fcopy_work, 5*HZ); + fcopy_send_data(); return; } icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index ea852537307e..521c14625b3a 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -127,6 +127,17 @@ kvp_work_func(struct work_struct *dummy) kvp_respond_to_host(NULL, HV_E_FAIL); } +static void poll_channel(struct vmbus_channel *channel) +{ + if (channel->target_cpu != smp_processor_id()) + smp_call_function_single(channel->target_cpu, + hv_kvp_onchannelcallback, + channel, true); + else + hv_kvp_onchannelcallback(channel); +} + + static int kvp_handle_handshake(struct hv_kvp_msg *msg) { int ret = 1; @@ -155,7 +166,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg) kvp_register(dm_reg_value); kvp_transaction.active = false; if (kvp_transaction.kvp_context) - hv_kvp_onchannelcallback(kvp_transaction.kvp_context); + poll_channel(kvp_transaction.kvp_context); } return ret; } @@ -568,7 +579,7 @@ response_done: vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, VM_PKT_DATA_INBAND, 0); - + poll_channel(channel); } /* @@ -603,7 +614,7 @@ void hv_kvp_onchannelcallback(void *context) return; } - vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen, + vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen, &requestid); if (recvlen > 0) { diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index dd761806f0e8..3b9c9ef0deb8 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -319,7 +319,7 @@ static int util_probe(struct hv_device *dev, (struct hv_util_service *)dev_id->driver_data; int ret; - srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); + srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL); if (!srv->recv_buffer) return -ENOMEM; if (srv->util_init) { diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c index 5ffd81f19d01..0625e50d7a6e 100644 --- a/drivers/hwmon/adc128d818.c +++ b/drivers/hwmon/adc128d818.c @@ -239,50 +239,50 @@ static ssize_t adc128_show_alarm(struct device *dev, return sprintf(buf, "%u\n", !!(alarms & mask)); } -static SENSOR_DEVICE_ATTR_2(in0_input, S_IWUSR | S_IRUGO, - adc128_show_in, adc128_set_in, 0, 0); +static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, + adc128_show_in, NULL, 0, 0); static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 0, 1); static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 0, 2); -static SENSOR_DEVICE_ATTR_2(in1_input, S_IWUSR | S_IRUGO, - adc128_show_in, adc128_set_in, 1, 0); +static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, + adc128_show_in, NULL, 1, 0); static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 1, 1); static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 1, 2); -static SENSOR_DEVICE_ATTR_2(in2_input, S_IWUSR | S_IRUGO, - adc128_show_in, adc128_set_in, 2, 0); +static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, + adc128_show_in, NULL, 2, 0); static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 2, 1); static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 2, 2); -static SENSOR_DEVICE_ATTR_2(in3_input, S_IWUSR | S_IRUGO, - adc128_show_in, adc128_set_in, 3, 0); +static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO, + adc128_show_in, NULL, 3, 0); static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 3, 1); static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 3, 2); -static SENSOR_DEVICE_ATTR_2(in4_input, S_IWUSR | S_IRUGO, - adc128_show_in, adc128_set_in, 4, 0); +static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO, + adc128_show_in, NULL, 4, 0); static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 4, 1); static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 4, 2); -static SENSOR_DEVICE_ATTR_2(in5_input, S_IWUSR | S_IRUGO, - adc128_show_in, adc128_set_in, 5, 0); +static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO, + adc128_show_in, NULL, 5, 0); static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 5, 1); static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 5, 2); -static SENSOR_DEVICE_ATTR_2(in6_input, S_IWUSR | S_IRUGO, - adc128_show_in, adc128_set_in, 6, 0); +static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO, + adc128_show_in, NULL, 6, 0); static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO, adc128_show_in, adc128_set_in, 6, 1); static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO, diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c index 3eb4281689b5..d74241bb278c 100644 --- a/drivers/hwmon/adm1021.c +++ b/drivers/hwmon/adm1021.c @@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev, struct adm1021_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; long temp; - int err; + int reg_val, err; err = kstrtol(buf, 10, &temp); if (err) @@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev, temp /= 1000; mutex_lock(&data->update_lock); - data->temp_max[index] = clamp_val(temp, -128, 127); + reg_val = clamp_val(temp, -128, 127); + data->temp_max[index] = reg_val * 1000; if (!read_only) i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index), - data->temp_max[index]); + reg_val); mutex_unlock(&data->update_lock); return count; @@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev, struct adm1021_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; long temp; - int err; + int reg_val, err; err = kstrtol(buf, 10, &temp); if (err) @@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev, temp /= 1000; mutex_lock(&data->update_lock); - data->temp_min[index] = clamp_val(temp, -128, 127); + reg_val = clamp_val(temp, -128, 127); + data->temp_min[index] = reg_val * 1000; if (!read_only) i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index), - data->temp_min[index]); + reg_val); mutex_unlock(&data->update_lock); return count; diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c index 78339e880bd6..2804571b269e 100644 --- a/drivers/hwmon/adm1029.c +++ b/drivers/hwmon/adm1029.c @@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev, /* Update the value */ reg = (reg & 0x3F) | (val << 6); + /* Update the cache */ + data->fan_div[attr->index] = reg; + /* Write value */ i2c_smbus_write_byte_data(client, ADM1029_REG_FAN_DIV[attr->index], reg); diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index a8a540ca8c34..51c1a5a165ab 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr, if (ret) return ret; + val = clamp_val(val, 0, 127000); mutex_lock(&data->update_lock); data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]); adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), @@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr, if (ret) return ret; + val = clamp_val(val, 0, 127000); mutex_lock(&data->update_lock); data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], data->pwm[nr]); @@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, if (ret) return ret; - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); + val = clamp_val(val, -55000, 127000); mutex_lock(&data->update_lock); data->temp_min[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr), @@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, if (ret) return ret; - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); + val = clamp_val(val, -55000, 127000); mutex_lock(&data->update_lock); data->temp_max[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr), @@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr, if (ret) return ret; - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); + val = clamp_val(val, -55000, 127000); mutex_lock(&data->update_lock); data->temp_crit[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr), diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 0f4dea5ccf17..9ee3913850d6 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev, return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000); - temp = clamp_val(temp, 0, 255); + temp = clamp_val(temp, -128, 127); mutex_lock(&data->lock); data->temp_min[attr->index] = temp; @@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev, return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000); - temp = clamp_val(temp, 0, 255); + temp = clamp_val(temp, -128, 127); mutex_lock(&data->lock); data->temp_max[attr->index] = temp; @@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev, return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000); - temp = clamp_val(temp, 0, 255); + temp = clamp_val(temp, -128, 127); mutex_lock(&data->lock); data->pwm_tmin[attr->index] = temp; diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index eea817296513..9f2be3dd28f3 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c @@ -704,7 +704,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP1_MAX); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP1_CRIT); -static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR, +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_temp, NULL, IDX_TEMP2_INPUT); static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp, set_temp, IDX_TEMP2_MIN); diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c index afd31042b452..d14ab3c45daa 100644 --- a/drivers/hwmon/da9052-hwmon.c +++ b/drivers/hwmon/da9052-hwmon.c @@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev, struct device_attribute *devattr, char *buf) { - return sprintf(buf, "da9052-hwmon\n"); + return sprintf(buf, "da9052\n"); } static ssize_t show_label(struct device *dev, diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c index 73b3865f1207..35eb7738d711 100644 --- a/drivers/hwmon/da9055-hwmon.c +++ b/drivers/hwmon/da9055-hwmon.c @@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev, struct device_attribute *devattr, char *buf) { - return sprintf(buf, "da9055-hwmon\n"); + return sprintf(buf, "da9055\n"); } static ssize_t show_label(struct device *dev, diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c index fd892dd48e4c..78002de46cb6 100644 --- a/drivers/hwmon/emc2103.c +++ b/drivers/hwmon/emc2103.c @@ -250,9 +250,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da, if (result < 0) return result; - val = DIV_ROUND_CLOSEST(val, 1000); - if ((val < -63) || (val > 127)) - return -EINVAL; + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127); mutex_lock(&data->update_lock); data->temp_min[nr] = val; @@ -274,9 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da, if (result < 0) return result; - val = DIV_ROUND_CLOSEST(val, 1000); - if ((val < -63) || (val > 127)) - return -EINVAL; + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127); mutex_lock(&data->update_lock); data->temp_max[nr] = val; @@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da, { struct emc2103_data *data = emc2103_update_device(dev); struct i2c_client *client = to_i2c_client(dev); - long rpm_target; + unsigned long rpm_target; - int result = kstrtol(buf, 10, &rpm_target); + int result = kstrtoul(buf, 10, &rpm_target); if (result < 0) return result; /* Datasheet states 16384 as maximum RPM target (table 3.2) */ - if ((rpm_target < 0) || (rpm_target > 16384)) - return -EINVAL; + rpm_target = clamp_val(rpm_target, 0, 16384); mutex_lock(&data->update_lock); diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index bdfbe9114889..ae66f42c4d6d 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -512,7 +512,7 @@ static int ntc_thermistor_probe(struct platform_device *pdev) } dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n", - pdev->name); + pdev_id->name); return 0; err_after_sysfs: diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c index efee4c59239f..34b9a601ad07 100644 --- a/drivers/hwmon/smsc47m192.c +++ b/drivers/hwmon/smsc47m192.c @@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n) */ static inline s8 TEMP_TO_REG(int val) { - return clamp_val(SCALE(val, 1, 1000), -128000, 127000); + return SCALE(clamp_val(val, -128000, 127000), 1, 1000); } static inline int TEMP_FROM_REG(s8 val) @@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, err = kstrtoul(buf, 10, &val); if (err) return err; + if (val > 255) + return -EINVAL; data->vrm = val; return count; diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c index 09de4fd12d57..4d75d4759709 100644 --- a/drivers/i2c/busses/i2c-sun6i-p2wi.c +++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c @@ -22,7 +22,6 @@ * */ #include <linux/clk.h> -#include <linux/module.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/interrupt.h> diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index f7f9865b8b89..f6d313e528de 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig @@ -40,6 +40,7 @@ config I2C_MUX_PCA9541 config I2C_MUX_PCA954x tristate "Philips PCA954x I2C Mux/switches" + depends on GPIOLIB help If you say yes here you get support for the Philips PCA954x I2C mux/switch devices. diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 8fb46aab2d87..a04c49f2a011 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -416,6 +416,7 @@ config BLK_DEV_CY82C693 config BLK_DEV_CS5520 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)" + depends on X86_32 || COMPILE_TEST select BLK_DEV_IDEDMA_PCI help Include support for PIO tuning and virtual DMA on the Cyrix MediaGX @@ -426,6 +427,7 @@ config BLK_DEV_CS5520 config BLK_DEV_CS5530 tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support" + depends on X86_32 || COMPILE_TEST select BLK_DEV_IDEDMA_PCI help Include support for UDMA on the Cyrix MediaGX 5530 chipset. This @@ -435,7 +437,7 @@ config BLK_DEV_CS5530 config BLK_DEV_CS5535 tristate "AMD CS5535 chipset support" - depends on X86 && !X86_64 + depends on X86_32 select BLK_DEV_IDEDMA_PCI help Include support for UDMA on the NSC/AMD CS5535 companion chipset. @@ -486,6 +488,7 @@ config BLK_DEV_JMICRON config BLK_DEV_SC1200 tristate "National SCx200 chipset support" + depends on X86_32 || COMPILE_TEST select BLK_DEV_IDEDMA_PCI help This driver adds support for the on-board IDE controller on the diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 2a744a91370e..a3d3b1733c49 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif) if (irq_handler == NULL) irq_handler = ide_intr; - if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) - goto out_up; + if (!host->get_lock) + if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) + goto out_up; #if !defined(__mc68000__) printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, @@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif) ide_proc_unregister_port(hwif); - free_irq(hwif->irq, hwif); + if (!hwif->host->get_lock) + free_irq(hwif->irq, hwif); device_unregister(hwif->portdev); device_unregister(&hwif->gendev); diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c index 69abf9163df7..54e464e4bb72 100644 --- a/drivers/iio/accel/hid-sensor-accel-3d.c +++ b/drivers/iio/accel/hid-sensor-accel-3d.c @@ -110,7 +110,6 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev, struct accel_3d_state *accel_state = iio_priv(indio_dev); int report_id = -1; u32 address; - int ret; int ret_type; s32 poll_value; @@ -151,14 +150,12 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev, ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SAMP_FREQ: - ret = hid_sensor_read_samp_freq_value( + ret_type = hid_sensor_read_samp_freq_value( &accel_state->common_attributes, val, val2); - ret_type = IIO_VAL_INT_PLUS_MICRO; break; case IIO_CHAN_INFO_HYSTERESIS: - ret = hid_sensor_read_raw_hyst_value( + ret_type = hid_sensor_read_raw_hyst_value( &accel_state->common_attributes, val, val2); - ret_type = IIO_VAL_INT_PLUS_MICRO; break; default: ret_type = -EINVAL; diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 17aeea170566..2a5fa9a436e5 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -111,8 +111,14 @@ static const int mma8452_samp_freq[8][2] = { {6, 250000}, {1, 560000} }; +/* + * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048 + * The userspace interface uses m/s^2 and we declare micro units + * So scale factor is given by: + * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665 + */ static const int mma8452_scales[3][2] = { - {0, 977}, {0, 1953}, {0, 3906} + {0, 9577}, {0, 19154}, {0, 38307} }; static ssize_t mma8452_show_samp_freq_avail(struct device *dev, diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c index 39b4cb48d738..6eba301ee03d 100644 --- a/drivers/iio/adc/ad799x.c +++ b/drivers/iio/adc/ad799x.c @@ -427,9 +427,12 @@ static int ad799x_write_event_value(struct iio_dev *indio_dev, int ret; struct ad799x_state *st = iio_priv(indio_dev); + if (val < 0 || val > RES_MASK(chan->scan_type.realbits)) + return -EINVAL; + mutex_lock(&indio_dev->mlock); ret = ad799x_i2c_write16(st, ad799x_threshold_reg(chan, dir, info), - val); + val << chan->scan_type.shift); mutex_unlock(&indio_dev->mlock); return ret; @@ -452,7 +455,8 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev, mutex_unlock(&indio_dev->mlock); if (ret < 0) return ret; - *val = valin; + *val = (valin >> chan->scan_type.shift) & + RES_MASK(chan->scan_type.realbits); return IIO_VAL_INT; } diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index a4db3026bec6..d5dc4c6ce86c 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -374,7 +374,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, return -EAGAIN; } } - map_val = chan->channel + TOTAL_CHANNELS; + map_val = adc_dev->channel_step[chan->scan_index]; /* * We check the complete FIFO. We programmed just one entry but in case diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c index 40f4e4935d0d..fa034a3dad78 100644 --- a/drivers/iio/gyro/hid-sensor-gyro-3d.c +++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c @@ -110,7 +110,6 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev, struct gyro_3d_state *gyro_state = iio_priv(indio_dev); int report_id = -1; u32 address; - int ret; int ret_type; s32 poll_value; @@ -151,14 +150,12 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev, ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SAMP_FREQ: - ret = hid_sensor_read_samp_freq_value( + ret_type = hid_sensor_read_samp_freq_value( &gyro_state->common_attributes, val, val2); - ret_type = IIO_VAL_INT_PLUS_MICRO; break; case IIO_CHAN_INFO_HYSTERESIS: - ret = hid_sensor_read_raw_hyst_value( + ret_type = hid_sensor_read_raw_hyst_value( &gyro_state->common_attributes, val, val2); - ret_type = IIO_VAL_INT_PLUS_MICRO; break; default: ret_type = -EINVAL; diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index 258a973a1fb8..bfbf4d419f41 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -345,6 +345,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev, &indio_dev->event_interface->dev_attr_list); kfree(postfix); + if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) + continue; + if (ret) return ret; diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index d833d55052ea..c7497009d60a 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -183,7 +183,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, else if (name && index >= 0) { pr_err("ERROR: could not get IIO channel %s:%s(%i)\n", np->full_name, name ? name : "", index); - return chan; + return NULL; } /* @@ -193,8 +193,9 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, */ np = np->parent; if (np && !of_get_property(np, "io-channel-ranges", NULL)) - break; + return NULL; } + return chan; } @@ -317,6 +318,7 @@ struct iio_channel *iio_channel_get(struct device *dev, if (channel != NULL) return channel; } + return iio_channel_get_sys(name, channel_name); } EXPORT_SYMBOL_GPL(iio_channel_get); diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c index f34c94380b41..96e71e103ea7 100644 --- a/drivers/iio/light/hid-sensor-als.c +++ b/drivers/iio/light/hid-sensor-als.c @@ -79,7 +79,6 @@ static int als_read_raw(struct iio_dev *indio_dev, struct als_state *als_state = iio_priv(indio_dev); int report_id = -1; u32 address; - int ret; int ret_type; s32 poll_value; @@ -129,14 +128,12 @@ static int als_read_raw(struct iio_dev *indio_dev, ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SAMP_FREQ: - ret = hid_sensor_read_samp_freq_value( + ret_type = hid_sensor_read_samp_freq_value( &als_state->common_attributes, val, val2); - ret_type = IIO_VAL_INT_PLUS_MICRO; break; case IIO_CHAN_INFO_HYSTERESIS: - ret = hid_sensor_read_raw_hyst_value( + ret_type = hid_sensor_read_raw_hyst_value( &als_state->common_attributes, val, val2); - ret_type = IIO_VAL_INT_PLUS_MICRO; break; default: ret_type = -EINVAL; diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index d203ef4d892f..412bae86d6ae 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -74,7 +74,6 @@ static int prox_read_raw(struct iio_dev *indio_dev, struct prox_state *prox_state = iio_priv(indio_dev); int report_id = -1; u32 address; - int ret; int ret_type; s32 poll_value; @@ -125,14 +124,12 @@ static int prox_read_raw(struct iio_dev *indio_dev, ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SAMP_FREQ: - ret = hid_sensor_read_samp_freq_value( + ret_type = hid_sensor_read_samp_freq_value( &prox_state->common_attributes, val, val2); - ret_type = IIO_VAL_INT_PLUS_MICRO; break; case IIO_CHAN_INFO_HYSTERESIS: - ret = hid_sensor_read_raw_hyst_value( + ret_type = hid_sensor_read_raw_hyst_value( &prox_state->common_attributes, val, val2); - ret_type = IIO_VAL_INT_PLUS_MICRO; break; default: ret_type = -EINVAL; diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c index fe063a0a21cd..752569985d1d 100644 --- a/drivers/iio/light/tcs3472.c +++ b/drivers/iio/light/tcs3472.c @@ -52,6 +52,7 @@ struct tcs3472_data { struct i2c_client *client; + struct mutex lock; u8 enable; u8 control; u8 atime; @@ -116,10 +117,17 @@ static int tcs3472_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: + if (iio_buffer_enabled(indio_dev)) + return -EBUSY; + + mutex_lock(&data->lock); ret = tcs3472_req_data(data); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&data->lock); return ret; + } ret = i2c_smbus_read_word_data(data->client, chan->address); + mutex_unlock(&data->lock); if (ret < 0) return ret; *val = ret; @@ -255,6 +263,7 @@ static int tcs3472_probe(struct i2c_client *client, data = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); data->client = client; + mutex_init(&data->lock); indio_dev->dev.parent = &client->dev; indio_dev->info = &tcs3472_info; diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c index 41cf29e2a371..b2b0937d5133 100644 --- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c +++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c @@ -110,7 +110,6 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev, struct magn_3d_state *magn_state = iio_priv(indio_dev); int report_id = -1; u32 address; - int ret; int ret_type; s32 poll_value; @@ -153,14 +152,12 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev, ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SAMP_FREQ: - ret = hid_sensor_read_samp_freq_value( + ret_type = hid_sensor_read_samp_freq_value( &magn_state->common_attributes, val, val2); - ret_type = IIO_VAL_INT_PLUS_MICRO; break; case IIO_CHAN_INFO_HYSTERESIS: - ret = hid_sensor_read_raw_hyst_value( + ret_type = hid_sensor_read_raw_hyst_value( &magn_state->common_attributes, val, val2); - ret_type = IIO_VAL_INT_PLUS_MICRO; break; default: ret_type = -EINVAL; diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c index 1cd190c73788..2c0d2a4fed8c 100644 --- a/drivers/iio/pressure/hid-sensor-press.c +++ b/drivers/iio/pressure/hid-sensor-press.c @@ -78,7 +78,6 @@ static int press_read_raw(struct iio_dev *indio_dev, struct press_state *press_state = iio_priv(indio_dev); int report_id = -1; u32 address; - int ret; int ret_type; s32 poll_value; @@ -128,14 +127,12 @@ static int press_read_raw(struct iio_dev *indio_dev, ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SAMP_FREQ: - ret = hid_sensor_read_samp_freq_value( + ret_type = hid_sensor_read_samp_freq_value( &press_state->common_attributes, val, val2); - ret_type = IIO_VAL_INT_PLUS_MICRO; break; case IIO_CHAN_INFO_HYSTERESIS: - ret = hid_sensor_read_raw_hyst_value( + ret_type = hid_sensor_read_raw_hyst_value( &press_state->common_attributes, val, val2); - ret_type = IIO_VAL_INT_PLUS_MICRO; break; default: ret_type = -EINVAL; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 5e153f6d4b48..768a0fb67dd6 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -432,8 +432,17 @@ static void arp_failure_discard(void *handle, struct sk_buff *skb) */ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) { + struct c4iw_ep *ep = handle; + printk(KERN_ERR MOD "ARP failure duing connect\n"); kfree_skb(skb); + connect_reply_upcall(ep, -EHOSTUNREACH); + state_set(&ep->com, DEAD); + remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); + cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); + dst_release(ep->dst); + cxgb4_l2t_release(ep->l2t); + c4iw_put_ep(&ep->com); } /* @@ -658,7 +667,7 @@ static int send_connect(struct c4iw_ep *ep) opt2 |= T5_OPT_2_VALID; opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); } - t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); + t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { if (ep->com.remote_addr.ss_family == AF_INET) { @@ -2180,7 +2189,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); BUG_ON(skb_cloned(skb)); skb_trim(skb, sizeof(struct cpl_tid_release)); - skb_get(skb); release_tid(&dev->rdev, hwtid, skb); return; } @@ -3917,7 +3925,7 @@ int __init c4iw_cm_init(void) return 0; } -void __exit c4iw_cm_term(void) +void c4iw_cm_term(void) { WARN_ON(!list_empty(&timeout_list)); flush_workqueue(workq); diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index dd93aadc996e..7db82b24302b 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -696,6 +696,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) pr_err(MOD "error allocating status page\n"); goto err4; } + rdev->status_page->db_off = 0; return 0; err4: c4iw_rqtpool_destroy(rdev); @@ -729,7 +730,6 @@ static void c4iw_dealloc(struct uld_ctx *ctx) if (ctx->dev->rdev.oc_mw_kva) iounmap(ctx->dev->rdev.oc_mw_kva); ib_dealloc_device(&ctx->dev->ibdev); - iwpm_exit(RDMA_NL_C4IW); ctx->dev = NULL; } @@ -826,12 +826,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) setup_debugfs(devp); } - ret = iwpm_init(RDMA_NL_C4IW); - if (ret) { - pr_err("port mapper initialization failed with %d\n", ret); - ib_dealloc_device(&devp->ibdev); - return ERR_PTR(ret); - } return devp; } @@ -1332,6 +1326,15 @@ static int __init c4iw_init_module(void) pr_err("%s[%u]: Failed to add netlink callback\n" , __func__, __LINE__); + err = iwpm_init(RDMA_NL_C4IW); + if (err) { + pr_err("port mapper initialization failed with %d\n", err); + ibnl_remove_client(RDMA_NL_C4IW); + c4iw_cm_term(); + debugfs_remove_recursive(c4iw_debugfs_root); + return err; + } + cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); return 0; @@ -1349,6 +1352,7 @@ static void __exit c4iw_exit_module(void) } mutex_unlock(&dev_mutex); cxgb4_unregister_uld(CXGB4_ULD_RDMA); + iwpm_exit(RDMA_NL_C4IW); ibnl_remove_client(RDMA_NL_C4IW); c4iw_cm_term(); debugfs_remove_recursive(c4iw_debugfs_root); diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 125bc5d1e175..361fff7a0742 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -908,7 +908,7 @@ int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev); int c4iw_register_device(struct c4iw_dev *dev); void c4iw_unregister_device(struct c4iw_dev *dev); int __init c4iw_cm_init(void); -void __exit c4iw_cm_term(void); +void c4iw_cm_term(void); void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index d13ddf1c0033..bbbcf389272c 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -675,7 +675,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, int err; uuari = &dev->mdev.priv.uuari; - if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN) + if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)) return -EINVAL; if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) diff --git a/drivers/input/input.c b/drivers/input/input.c index 1c4c0db05550..29ca0bb4f561 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev, } static int input_get_disposition(struct input_dev *dev, - unsigned int type, unsigned int code, int value) + unsigned int type, unsigned int code, int *pval) { int disposition = INPUT_IGNORE_EVENT; + int value = *pval; switch (type) { @@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev, break; } + *pval = value; return disposition; } @@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev, { int disposition; - disposition = input_get_disposition(dev, type, code, value); + disposition = input_get_disposition(dev, type, code, &value); if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) dev->event(dev, type, code, value); diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c index 758b48731415..de7be4f03d91 100644 --- a/drivers/input/keyboard/st-keyscan.c +++ b/drivers/input/keyboard/st-keyscan.c @@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP static int keyscan_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev) mutex_unlock(&input->mutex); return retval; } +#endif static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume); diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c index e4104f9b2e6d..fed5102e1802 100644 --- a/drivers/input/misc/sirfsoc-onkey.c +++ b/drivers/input/misc/sirfsoc-onkey.c @@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = { module_platform_driver(sirfsoc_pwrc_driver); -MODULE_LICENSE("GPLv2"); +MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>"); MODULE_DESCRIPTION("CSR Prima2 PWRC Driver"); MODULE_ALIAS("platform:sirfsoc-pwrc"); diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index ec772d962f06..ef9e0b8a9aa7 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = { 1232, 5710, 1156, 4696 }, { - (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL}, + (const char * const []){"LEN0034", "LEN0036", "LEN2002", + "LEN2004", NULL}, 1024, 5112, 2024, 4832 }, { @@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN0049", "LEN2000", "LEN2001", /* Edge E431 */ - "LEN2002", + "LEN2002", /* Edge E531 */ "LEN2003", "LEN2004", /* L440 */ "LEN2005", diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 381b20d4c561..136b7b204f56 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -402,6 +402,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { }, }, { + /* Acer Aspire 5710 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"), + }, + }, + { /* Gericom Bellagio */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 977d05cd9e2e..e73cf2c71f35 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) * a=(pi*r^2)/C. */ int a = data[5]; - int x_res = input_abs_get_res(input, ABS_X); - int y_res = input_abs_get_res(input, ABS_Y); - width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); + int x_res = input_abs_get_res(input, ABS_MT_POSITION_X); + int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y); + width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); height = width * y_res / x_res; } @@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev, input_abs_set_res(input_dev, ABS_X, features->x_resolution); input_abs_set_res(input_dev, ABS_Y, features->y_resolution); } else { - if (features->touch_max <= 2) { + if (features->touch_max == 1) { input_set_abs_params(input_dev, ABS_X, 0, features->x_max, features->x_fuzz, 0); input_set_abs_params(input_dev, ABS_Y, 0, @@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case MTTPC: case MTTPC_B: case TABLETPC2FG: - if (features->device_type == BTN_TOOL_FINGER) { - unsigned int flags = INPUT_MT_DIRECT; - - if (wacom_wac->features.type == TABLETPC2FG) - flags = 0; - - input_mt_init_slots(input_dev, features->touch_max, flags); - } + if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1) + input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT); /* fall through */ case TABLETPC: @@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, __set_bit(BTN_RIGHT, input_dev->keybit); if (features->touch_max) { - /* touch interface */ - unsigned int flags = INPUT_MT_POINTER; - - __set_bit(INPUT_PROP_POINTER, input_dev->propbit); if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, @@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, features->y_max, 0, 0); - } else { - __set_bit(BTN_TOOL_FINGER, input_dev->keybit); - __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); - flags = 0; } - input_mt_init_slots(input_dev, features->touch_max, flags); + input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER); } else { /* buttons/keys only interface */ __clear_bit(ABS_X, input_dev->absbit); diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c index 4e793a17361f..2ce649520fe0 100644 --- a/drivers/input/touchscreen/ti_am335x_tsc.c +++ b/drivers/input/touchscreen/ti_am335x_tsc.c @@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev, */ err = of_property_read_u32(node, "ti,coordinate-readouts", &ts_dev->coordinate_readouts); - if (err < 0) + if (err < 0) { + dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n"); err = of_property_read_u32(node, "ti,coordiante-readouts", &ts_dev->coordinate_readouts); + } + if (err < 0) return err; diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 154e5a838257..dd5112265cc9 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -76,7 +76,7 @@ config AMD_IOMMU_STATS config AMD_IOMMU_V2 tristate "AMD IOMMU Version 2 driver" - depends on AMD_IOMMU && PROFILING + depends on AMD_IOMMU select MMU_NOTIFIER ---help--- This option enables support for the AMD IOMMUv2 features of the IOMMU @@ -176,6 +176,7 @@ config EXYNOS_IOMMU bool "Exynos IOMMU Support" depends on ARCH_EXYNOS select IOMMU_API + select ARM_DMA_USE_IOMMU help Support for the IOMMU (System MMU) of Samsung Exynos application processor family. This enables H/W multimedia accelerators to see diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 6a4a00ef088b..16edef74b8ee 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o +obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 4aec6a29e316..18405314168b 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -46,7 +46,6 @@ #include "amd_iommu_proto.h" #include "amd_iommu_types.h" #include "irq_remapping.h" -#include "pci.h" #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) @@ -81,7 +80,7 @@ LIST_HEAD(hpet_map); */ static struct protection_domain *pt_domain; -static struct iommu_ops amd_iommu_ops; +static const struct iommu_ops amd_iommu_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); int amd_iommu_max_glx_val = -1; @@ -133,9 +132,6 @@ static void free_dev_data(struct iommu_dev_data *dev_data) list_del(&dev_data->dev_data_list); spin_unlock_irqrestore(&dev_data_list_lock, flags); - if (dev_data->group) - iommu_group_put(dev_data->group); - kfree(dev_data); } @@ -264,167 +260,79 @@ static bool check_device(struct device *dev) return true; } -static struct pci_bus *find_hosted_bus(struct pci_bus *bus) -{ - while (!bus->self) { - if (!pci_is_root_bus(bus)) - bus = bus->parent; - else - return ERR_PTR(-ENODEV); - } - - return bus; -} - -#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) - -static struct pci_dev *get_isolation_root(struct pci_dev *pdev) -{ - struct pci_dev *dma_pdev = pdev; - - /* Account for quirked devices */ - swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); - - /* - * If it's a multifunction device that does not support our - * required ACS flags, add to the same group as lowest numbered - * function that also does not suport the required ACS flags. - */ - if (dma_pdev->multifunction && - !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { - u8 i, slot = PCI_SLOT(dma_pdev->devfn); - - for (i = 0; i < 8; i++) { - struct pci_dev *tmp; - - tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); - if (!tmp) - continue; - - if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { - swap_pci_ref(&dma_pdev, tmp); - break; - } - pci_dev_put(tmp); - } - } - - /* - * Devices on the root bus go through the iommu. If that's not us, - * find the next upstream device and test ACS up to the root bus. - * Finding the next device may require skipping virtual buses. - */ - while (!pci_is_root_bus(dma_pdev->bus)) { - struct pci_bus *bus = find_hosted_bus(dma_pdev->bus); - if (IS_ERR(bus)) - break; - - if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) - break; - - swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); - } - - return dma_pdev; -} - -static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev) +static int init_iommu_group(struct device *dev) { - struct iommu_group *group = iommu_group_get(&pdev->dev); - int ret; + struct iommu_group *group; - if (!group) { - group = iommu_group_alloc(); - if (IS_ERR(group)) - return PTR_ERR(group); + group = iommu_group_get_for_dev(dev); - WARN_ON(&pdev->dev != dev); - } + if (IS_ERR(group)) + return PTR_ERR(group); - ret = iommu_group_add_device(group, dev); iommu_group_put(group); - return ret; + return 0; } -static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data, - struct device *dev) +static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) { - if (!dev_data->group) { - struct iommu_group *group = iommu_group_alloc(); - if (IS_ERR(group)) - return PTR_ERR(group); - - dev_data->group = group; - } - - return iommu_group_add_device(dev_data->group, dev); + *(u16 *)data = alias; + return 0; } -static int init_iommu_group(struct device *dev) +static u16 get_alias(struct device *dev) { - struct iommu_dev_data *dev_data; - struct iommu_group *group; - struct pci_dev *dma_pdev; - int ret; - - group = iommu_group_get(dev); - if (group) { - iommu_group_put(group); - return 0; - } - - dev_data = find_dev_data(get_device_id(dev)); - if (!dev_data) - return -ENOMEM; + struct pci_dev *pdev = to_pci_dev(dev); + u16 devid, ivrs_alias, pci_alias; - if (dev_data->alias_data) { - u16 alias; - struct pci_bus *bus; + devid = get_device_id(dev); + ivrs_alias = amd_iommu_alias_table[devid]; + pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); - if (dev_data->alias_data->group) - goto use_group; + if (ivrs_alias == pci_alias) + return ivrs_alias; - /* - * If the alias device exists, it's effectively just a first - * level quirk for finding the DMA source. - */ - alias = amd_iommu_alias_table[dev_data->devid]; - dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff); - if (dma_pdev) { - dma_pdev = get_isolation_root(dma_pdev); - goto use_pdev; + /* + * DMA alias showdown + * + * The IVRS is fairly reliable in telling us about aliases, but it + * can't know about every screwy device. If we don't have an IVRS + * reported alias, use the PCI reported alias. In that case we may + * still need to initialize the rlookup and dev_table entries if the + * alias is to a non-existent device. + */ + if (ivrs_alias == devid) { + if (!amd_iommu_rlookup_table[pci_alias]) { + amd_iommu_rlookup_table[pci_alias] = + amd_iommu_rlookup_table[devid]; + memcpy(amd_iommu_dev_table[pci_alias].data, + amd_iommu_dev_table[devid].data, + sizeof(amd_iommu_dev_table[pci_alias].data)); } - /* - * If the alias is virtual, try to find a parent device - * and test whether the IOMMU group is actualy rooted above - * the alias. Be careful to also test the parent device if - * we think the alias is the root of the group. - */ - bus = pci_find_bus(0, alias >> 8); - if (!bus) - goto use_group; - - bus = find_hosted_bus(bus); - if (IS_ERR(bus) || !bus->self) - goto use_group; + return pci_alias; + } - dma_pdev = get_isolation_root(pci_dev_get(bus->self)); - if (dma_pdev != bus->self || (dma_pdev->multifunction && - !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))) - goto use_pdev; + pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d " + "for device %s[%04x:%04x], kernel reported alias " + "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias), + PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device, + PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias), + PCI_FUNC(pci_alias)); - pci_dev_put(dma_pdev); - goto use_group; + /* + * If we don't have a PCI DMA alias and the IVRS alias is on the same + * bus, then the IVRS table may know about a quirk that we don't. + */ + if (pci_alias == devid && + PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) { + pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; + pdev->dma_alias_devfn = ivrs_alias & 0xff; + pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n", + PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias), + dev_name(dev)); } - dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev))); -use_pdev: - ret = use_pdev_iommu_group(dma_pdev, dev); - pci_dev_put(dma_pdev); - return ret; -use_group: - return use_dev_data_iommu_group(dev_data->alias_data, dev); + return ivrs_alias; } static int iommu_init_device(struct device *dev) @@ -441,7 +349,8 @@ static int iommu_init_device(struct device *dev) if (!dev_data) return -ENOMEM; - alias = amd_iommu_alias_table[dev_data->devid]; + alias = get_alias(dev); + if (alias != dev_data->devid) { struct iommu_dev_data *alias_data; @@ -470,6 +379,9 @@ static int iommu_init_device(struct device *dev) dev->archdata.iommu = dev_data; + iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, + dev); + return 0; } @@ -489,12 +401,22 @@ static void iommu_ignore_device(struct device *dev) static void iommu_uninit_device(struct device *dev) { + struct iommu_dev_data *dev_data = search_dev_data(get_device_id(dev)); + + if (!dev_data) + return; + + iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, + dev); + iommu_group_remove_device(dev); + /* Unlink from alias, it may change if another device is re-plugged */ + dev_data->alias_data = NULL; + /* - * Nothing to do here - we keep dev_data around for unplugged devices - * and reuse it when the device is re-plugged - not doing so would - * introduce a ton of races. + * We keep dev_data around for unplugged devices and reuse it when the + * device is re-plugged - not doing so would introduce a ton of races. */ } @@ -3473,7 +3395,7 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain, return 0; } -static struct iommu_ops amd_iommu_ops = { +static const struct iommu_ops amd_iommu_ops = { .domain_init = amd_iommu_domain_init, .domain_destroy = amd_iommu_domain_destroy, .attach_dev = amd_iommu_attach_device, diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 0e08545d7298..3783e0b44df6 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -26,6 +26,7 @@ #include <linux/msi.h> #include <linux/amd-iommu.h> #include <linux/export.h> +#include <linux/iommu.h> #include <asm/pci-direct.h> #include <asm/iommu.h> #include <asm/gart.h> @@ -1197,6 +1198,39 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu) iommu->max_counters = (u8) ((val >> 7) & 0xf); } +static ssize_t amd_iommu_show_cap(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amd_iommu *iommu = dev_get_drvdata(dev); + return sprintf(buf, "%x\n", iommu->cap); +} +static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); + +static ssize_t amd_iommu_show_features(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amd_iommu *iommu = dev_get_drvdata(dev); + return sprintf(buf, "%llx\n", iommu->features); +} +static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); + +static struct attribute *amd_iommu_attrs[] = { + &dev_attr_cap.attr, + &dev_attr_features.attr, + NULL, +}; + +static struct attribute_group amd_iommu_group = { + .name = "amd-iommu", + .attrs = amd_iommu_attrs, +}; + +static const struct attribute_group *amd_iommu_groups[] = { + &amd_iommu_group, + NULL, +}; static int iommu_init_pci(struct amd_iommu *iommu) { @@ -1297,6 +1331,10 @@ static int iommu_init_pci(struct amd_iommu *iommu) amd_iommu_erratum_746_workaround(iommu); + iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, + amd_iommu_groups, "ivhd%d", + iommu->index); + return pci_enable_device(iommu->dev); } diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index f1a5abf11acf..8e43b7cba133 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -390,12 +390,6 @@ struct amd_iommu_fault { }; -#define PPR_FAULT_EXEC (1 << 1) -#define PPR_FAULT_READ (1 << 2) -#define PPR_FAULT_WRITE (1 << 5) -#define PPR_FAULT_USER (1 << 6) -#define PPR_FAULT_RSVD (1 << 7) -#define PPR_FAULT_GN (1 << 8) struct iommu_domain; @@ -432,7 +426,6 @@ struct iommu_dev_data { struct iommu_dev_data *alias_data;/* The alias dev_data */ struct protection_domain *domain; /* Domain the device is bound to */ atomic_t bind; /* Domain attach reference count */ - struct iommu_group *group; /* IOMMU group for virtual aliases */ u16 devid; /* PCI Device ID */ bool iommu_v2; /* Device can make use of IOMMUv2 */ bool passthrough; /* Default for device is pt_domain */ @@ -578,6 +571,9 @@ struct amd_iommu { /* default dma_ops domain for that IOMMU */ struct dma_ops_domain *default_dom; + /* IOMMU sysfs device */ + struct device *iommu_dev; + /* * We can't rely on the BIOS to restore all values on reinit, so we * need to stash them diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 499b4366a98d..5f578e850fc5 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -47,12 +47,13 @@ struct pasid_state { atomic_t count; /* Reference count */ unsigned mmu_notifier_count; /* Counting nested mmu_notifier calls */ - struct task_struct *task; /* Task bound to this PASID */ struct mm_struct *mm; /* mm_struct for the faults */ - struct mmu_notifier mn; /* mmu_otifier handle */ + struct mmu_notifier mn; /* mmu_notifier handle */ struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */ struct device_state *device_state; /* Link to our device_state */ int pasid; /* PASID index */ + bool invalid; /* Used during setup and + teardown of the pasid */ spinlock_t lock; /* Protect pri_queues and mmu_notifer_count */ wait_queue_head_t wq; /* To wait for count == 0 */ @@ -99,7 +100,6 @@ static struct workqueue_struct *iommu_wq; static u64 *empty_page_table; static void free_pasid_states(struct device_state *dev_state); -static void unbind_pasid(struct device_state *dev_state, int pasid); static u16 device_id(struct pci_dev *pdev) { @@ -297,37 +297,29 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state) schedule(); finish_wait(&pasid_state->wq, &wait); - mmput(pasid_state->mm); free_pasid_state(pasid_state); } -static void __unbind_pasid(struct pasid_state *pasid_state) +static void unbind_pasid(struct pasid_state *pasid_state) { struct iommu_domain *domain; domain = pasid_state->device_state->domain; + /* + * Mark pasid_state as invalid, no more faults will we added to the + * work queue after this is visible everywhere. + */ + pasid_state->invalid = true; + + /* Make sure this is visible */ + smp_wmb(); + + /* After this the device/pasid can't access the mm anymore */ amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid); - clear_pasid_state(pasid_state->device_state, pasid_state->pasid); /* Make sure no more pending faults are in the queue */ flush_workqueue(iommu_wq); - - mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); - - put_pasid_state(pasid_state); /* Reference taken in bind() function */ -} - -static void unbind_pasid(struct device_state *dev_state, int pasid) -{ - struct pasid_state *pasid_state; - - pasid_state = get_pasid_state(dev_state, pasid); - if (pasid_state == NULL) - return; - - __unbind_pasid(pasid_state); - put_pasid_state_wait(pasid_state); /* Reference taken in this function */ } static void free_pasid_states_level1(struct pasid_state **tbl) @@ -373,6 +365,12 @@ static void free_pasid_states(struct device_state *dev_state) * unbind the PASID */ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); + + put_pasid_state_wait(pasid_state); /* Reference taken in + amd_iommu_bind_pasid */ + + /* Drop reference taken in amd_iommu_bind_pasid */ + put_device_state(dev_state); } if (dev_state->pasid_levels == 2) @@ -411,14 +409,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn, return 0; } -static void mn_change_pte(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address, - pte_t pte) -{ - __mn_flush_page(mn, address); -} - static void mn_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) @@ -472,22 +462,23 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct pasid_state *pasid_state; struct device_state *dev_state; + bool run_inv_ctx_cb; might_sleep(); - pasid_state = mn_to_state(mn); - dev_state = pasid_state->device_state; + pasid_state = mn_to_state(mn); + dev_state = pasid_state->device_state; + run_inv_ctx_cb = !pasid_state->invalid; - if (pasid_state->device_state->inv_ctx_cb) + if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb) dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); - unbind_pasid(dev_state, pasid_state->pasid); + unbind_pasid(pasid_state); } static struct mmu_notifier_ops iommu_mn = { .release = mn_release, .clear_flush_young = mn_clear_flush_young, - .change_pte = mn_change_pte, .invalidate_page = mn_invalidate_page, .invalidate_range_start = mn_invalidate_range_start, .invalidate_range_end = mn_invalidate_range_end, @@ -529,7 +520,7 @@ static void do_fault(struct work_struct *work) write = !!(fault->flags & PPR_FAULT_WRITE); down_read(&fault->state->mm->mmap_sem); - npages = get_user_pages(fault->state->task, fault->state->mm, + npages = get_user_pages(NULL, fault->state->mm, fault->address, 1, write, 0, &page, NULL); up_read(&fault->state->mm->mmap_sem); @@ -587,7 +578,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) goto out; pasid_state = get_pasid_state(dev_state, iommu_fault->pasid); - if (pasid_state == NULL) { + if (pasid_state == NULL || pasid_state->invalid) { /* We know the device but not the PASID -> send INVALID */ amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid, PPR_INVALID, tag); @@ -612,6 +603,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) fault->state = pasid_state; fault->tag = tag; fault->finish = finish; + fault->pasid = iommu_fault->pasid; fault->flags = iommu_fault->flags; INIT_WORK(&fault->work, do_fault); @@ -620,6 +612,10 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) ret = NOTIFY_OK; out_drop_state: + + if (ret != NOTIFY_OK && pasid_state) + put_pasid_state(pasid_state); + put_device_state(dev_state); out: @@ -635,6 +631,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, { struct pasid_state *pasid_state; struct device_state *dev_state; + struct mm_struct *mm; u16 devid; int ret; @@ -658,20 +655,23 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, if (pasid_state == NULL) goto out; + atomic_set(&pasid_state->count, 1); init_waitqueue_head(&pasid_state->wq); spin_lock_init(&pasid_state->lock); - pasid_state->task = task; - pasid_state->mm = get_task_mm(task); + mm = get_task_mm(task); + pasid_state->mm = mm; pasid_state->device_state = dev_state; pasid_state->pasid = pasid; + pasid_state->invalid = true; /* Mark as valid only if we are + done with setting up the pasid */ pasid_state->mn.ops = &iommu_mn; if (pasid_state->mm == NULL) goto out_free; - mmu_notifier_register(&pasid_state->mn, pasid_state->mm); + mmu_notifier_register(&pasid_state->mn, mm); ret = set_pasid_state(dev_state, pasid_state, pasid); if (ret) @@ -682,15 +682,26 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, if (ret) goto out_clear_state; + /* Now we are ready to handle faults */ + pasid_state->invalid = false; + + /* + * Drop the reference to the mm_struct here. We rely on the + * mmu_notifier release call-back to inform us when the mm + * is going away. + */ + mmput(mm); + return 0; out_clear_state: clear_pasid_state(dev_state, pasid); out_unregister: - mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); + mmu_notifier_unregister(&pasid_state->mn, mm); out_free: + mmput(mm); free_pasid_state(pasid_state); out: @@ -728,10 +739,22 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid) */ put_pasid_state(pasid_state); - /* This will call the mn_release function and unbind the PASID */ + /* Clear the pasid state so that the pasid can be re-used */ + clear_pasid_state(dev_state, pasid_state->pasid); + + /* + * Call mmu_notifier_unregister to drop our reference + * to pasid_state->mm + */ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); + put_pasid_state_wait(pasid_state); /* Reference taken in + amd_iommu_bind_pasid */ out: + /* Drop reference taken in this function */ + put_device_state(dev_state); + + /* Drop reference taken in amd_iommu_bind_pasid */ put_device_state(dev_state); } EXPORT_SYMBOL(amd_iommu_unbind_pasid); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 1599354e974d..ca18d6d42a9b 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -39,6 +39,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -316,9 +317,9 @@ #define FSR_AFF (1 << 2) #define FSR_TF (1 << 1) -#define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \ - FSR_TLBLKF) -#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ +#define FSR_IGN (FSR_AFF | FSR_ASF | \ + FSR_TLBMCF | FSR_TLBLKF) +#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ FSR_EF | FSR_PF | FSR_TF | FSR_IGN) #define FSYNR0_WNR (1 << 4) @@ -329,27 +330,20 @@ struct arm_smmu_smr { u16 id; }; -struct arm_smmu_master { - struct device_node *of_node; - - /* - * The following is specific to the master's position in the - * SMMU chain. - */ - struct rb_node node; +struct arm_smmu_master_cfg { int num_streamids; u16 streamids[MAX_MASTER_STREAMIDS]; - - /* - * We only need to allocate these on the root SMMU, as we - * configure unmatched streams to bypass translation. - */ struct arm_smmu_smr *smrs; }; +struct arm_smmu_master { + struct device_node *of_node; + struct rb_node node; + struct arm_smmu_master_cfg cfg; +}; + struct arm_smmu_device { struct device *dev; - struct device_node *parent_of_node; void __iomem *base; unsigned long size; @@ -387,7 +381,6 @@ struct arm_smmu_device { }; struct arm_smmu_cfg { - struct arm_smmu_device *smmu; u8 cbndx; u8 irptndx; u32 cbar; @@ -399,15 +392,8 @@ struct arm_smmu_cfg { #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) struct arm_smmu_domain { - /* - * A domain can span across multiple, chained SMMUs and requires - * all devices within the domain to follow the same translation - * path. - */ - struct arm_smmu_device *leaf_smmu; - struct arm_smmu_cfg root_cfg; - phys_addr_t output_mask; - + struct arm_smmu_device *smmu; + struct arm_smmu_cfg cfg; spinlock_t lock; }; @@ -419,7 +405,7 @@ struct arm_smmu_option_prop { const char *prop; }; -static struct arm_smmu_option_prop arm_smmu_options [] = { +static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, { 0, NULL}, }; @@ -427,6 +413,7 @@ static struct arm_smmu_option_prop arm_smmu_options [] = { static void parse_driver_options(struct arm_smmu_device *smmu) { int i = 0; + do { if (of_property_read_bool(smmu->dev->of_node, arm_smmu_options[i].prop)) { @@ -437,6 +424,19 @@ static void parse_driver_options(struct arm_smmu_device *smmu) } while (arm_smmu_options[++i].opt); } +static struct device *dev_get_master_dev(struct device *dev) +{ + if (dev_is_pci(dev)) { + struct pci_bus *bus = to_pci_dev(dev)->bus; + + while (!pci_is_root_bus(bus)) + bus = bus->parent; + return bus->bridge->parent; + } + + return dev; +} + static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, struct device_node *dev_node) { @@ -444,6 +444,7 @@ static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, while (node) { struct arm_smmu_master *master; + master = container_of(node, struct arm_smmu_master, node); if (dev_node < master->of_node) @@ -457,6 +458,18 @@ static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, return NULL; } +static struct arm_smmu_master_cfg * +find_smmu_master_cfg(struct arm_smmu_device *smmu, struct device *dev) +{ + struct arm_smmu_master *master; + + if (dev_is_pci(dev)) + return dev->archdata.iommu; + + master = find_smmu_master(smmu, dev->of_node); + return master ? &master->cfg : NULL; +} + static int insert_smmu_master(struct arm_smmu_device *smmu, struct arm_smmu_master *master) { @@ -465,8 +478,8 @@ static int insert_smmu_master(struct arm_smmu_device *smmu, new = &smmu->masters.rb_node; parent = NULL; while (*new) { - struct arm_smmu_master *this; - this = container_of(*new, struct arm_smmu_master, node); + struct arm_smmu_master *this + = container_of(*new, struct arm_smmu_master, node); parent = *new; if (master->of_node < this->of_node) @@ -508,33 +521,30 @@ static int register_smmu_master(struct arm_smmu_device *smmu, if (!master) return -ENOMEM; - master->of_node = masterspec->np; - master->num_streamids = masterspec->args_count; + master->of_node = masterspec->np; + master->cfg.num_streamids = masterspec->args_count; - for (i = 0; i < master->num_streamids; ++i) - master->streamids[i] = masterspec->args[i]; + for (i = 0; i < master->cfg.num_streamids; ++i) + master->cfg.streamids[i] = masterspec->args[i]; return insert_smmu_master(smmu, master); } -static struct arm_smmu_device *find_parent_smmu(struct arm_smmu_device *smmu) +static struct arm_smmu_device *find_smmu_for_device(struct device *dev) { - struct arm_smmu_device *parent; - - if (!smmu->parent_of_node) - return NULL; + struct arm_smmu_device *smmu; + struct arm_smmu_master *master = NULL; + struct device_node *dev_node = dev_get_master_dev(dev)->of_node; spin_lock(&arm_smmu_devices_lock); - list_for_each_entry(parent, &arm_smmu_devices, list) - if (parent->dev->of_node == smmu->parent_of_node) - goto out_unlock; - - parent = NULL; - dev_warn(smmu->dev, - "Failed to find SMMU parent despite parent in DT\n"); -out_unlock: + list_for_each_entry(smmu, &arm_smmu_devices, list) { + master = find_smmu_master(smmu, dev_node); + if (master) + break; + } spin_unlock(&arm_smmu_devices_lock); - return parent; + + return master ? smmu : NULL; } static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) @@ -574,9 +584,10 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) } } -static void arm_smmu_tlb_inv_context(struct arm_smmu_cfg *cfg) +static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) { - struct arm_smmu_device *smmu = cfg->smmu; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + struct arm_smmu_device *smmu = smmu_domain->smmu; void __iomem *base = ARM_SMMU_GR0(smmu); bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; @@ -600,11 +611,11 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) unsigned long iova; struct iommu_domain *domain = dev; struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; - struct arm_smmu_device *smmu = root_cfg->smmu; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + struct arm_smmu_device *smmu = smmu_domain->smmu; void __iomem *cb_base; - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); if (!(fsr & FSR_FAULT)) @@ -631,7 +642,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) } else { dev_err_ratelimited(smmu->dev, "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", - iova, fsynr, root_cfg->cbndx); + iova, fsynr, cfg->cbndx); ret = IRQ_NONE; resume = RESUME_TERMINATE; } @@ -696,19 +707,19 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) { u32 reg; bool stage1; - struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; - struct arm_smmu_device *smmu = root_cfg->smmu; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + struct arm_smmu_device *smmu = smmu_domain->smmu; void __iomem *cb_base, *gr0_base, *gr1_base; gr0_base = ARM_SMMU_GR0(smmu); gr1_base = ARM_SMMU_GR1(smmu); - stage1 = root_cfg->cbar != CBAR_TYPE_S2_TRANS; - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); + stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); /* CBAR */ - reg = root_cfg->cbar; + reg = cfg->cbar; if (smmu->version == 1) - reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; + reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; /* * Use the weakest shareability/memory types, so they are @@ -718,9 +729,9 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); } else { - reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT; + reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT; } - writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); + writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); if (smmu->version > 1) { /* CBA2R */ @@ -730,7 +741,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) reg = CBA2R_RW64_32BIT; #endif writel_relaxed(reg, - gr1_base + ARM_SMMU_GR1_CBA2R(root_cfg->cbndx)); + gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); /* TTBCR2 */ switch (smmu->input_size) { @@ -780,13 +791,13 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) } /* TTBR0 */ - arm_smmu_flush_pgtable(smmu, root_cfg->pgd, + arm_smmu_flush_pgtable(smmu, cfg->pgd, PTRS_PER_PGD * sizeof(pgd_t)); - reg = __pa(root_cfg->pgd); + reg = __pa(cfg->pgd); writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); - reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; + reg = (phys_addr_t)__pa(cfg->pgd) >> 32; if (stage1) - reg |= ARM_SMMU_CB_ASID(root_cfg) << TTBRn_HI_ASID_SHIFT; + reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); /* @@ -800,6 +811,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) reg = TTBCR_TG0_64K; if (!stage1) { + reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT; + switch (smmu->s2_output_size) { case 32: reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); @@ -821,7 +834,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) break; } } else { - reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT; + reg |= (64 - smmu->input_size) << TTBCR_T0SZ_SHIFT; } } else { reg = 0; @@ -853,44 +866,25 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) } static int arm_smmu_init_domain_context(struct iommu_domain *domain, - struct device *dev) + struct arm_smmu_device *smmu) { int irq, ret, start; struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; - struct arm_smmu_device *smmu, *parent; - - /* - * Walk the SMMU chain to find the root device for this chain. - * We assume that no masters have translations which terminate - * early, and therefore check that the root SMMU does indeed have - * a StreamID for the master in question. - */ - parent = dev->archdata.iommu; - smmu_domain->output_mask = -1; - do { - smmu = parent; - smmu_domain->output_mask &= (1ULL << smmu->s2_output_size) - 1; - } while ((parent = find_parent_smmu(smmu))); - - if (!find_smmu_master(smmu, dev->of_node)) { - dev_err(dev, "unable to find root SMMU for device\n"); - return -ENODEV; - } + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { /* * We will likely want to change this if/when KVM gets * involved. */ - root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; + cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; start = smmu->num_s2_context_banks; - } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) { - root_cfg->cbar = CBAR_TYPE_S2_TRANS; - start = 0; - } else { - root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; + } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) { + cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; start = smmu->num_s2_context_banks; + } else { + cfg->cbar = CBAR_TYPE_S2_TRANS; + start = 0; } ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, @@ -898,38 +892,38 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, if (IS_ERR_VALUE(ret)) return ret; - root_cfg->cbndx = ret; + cfg->cbndx = ret; if (smmu->version == 1) { - root_cfg->irptndx = atomic_inc_return(&smmu->irptndx); - root_cfg->irptndx %= smmu->num_context_irqs; + cfg->irptndx = atomic_inc_return(&smmu->irptndx); + cfg->irptndx %= smmu->num_context_irqs; } else { - root_cfg->irptndx = root_cfg->cbndx; + cfg->irptndx = cfg->cbndx; } - irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; + irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, "arm-smmu-context-fault", domain); if (IS_ERR_VALUE(ret)) { dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", - root_cfg->irptndx, irq); - root_cfg->irptndx = INVALID_IRPTNDX; + cfg->irptndx, irq); + cfg->irptndx = INVALID_IRPTNDX; goto out_free_context; } - root_cfg->smmu = smmu; + smmu_domain->smmu = smmu; arm_smmu_init_context_bank(smmu_domain); - return ret; + return 0; out_free_context: - __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); + __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); return ret; } static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) { struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; - struct arm_smmu_device *smmu = root_cfg->smmu; + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; void __iomem *cb_base; int irq; @@ -937,16 +931,16 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) return; /* Disable the context bank and nuke the TLB before freeing it. */ - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); - arm_smmu_tlb_inv_context(root_cfg); + arm_smmu_tlb_inv_context(smmu_domain); - if (root_cfg->irptndx != INVALID_IRPTNDX) { - irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; + if (cfg->irptndx != INVALID_IRPTNDX) { + irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; free_irq(irq, domain); } - __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); + __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); } static int arm_smmu_domain_init(struct iommu_domain *domain) @@ -963,10 +957,10 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) if (!smmu_domain) return -ENOMEM; - pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); + pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL); if (!pgd) goto out_free_domain; - smmu_domain->root_cfg.pgd = pgd; + smmu_domain->cfg.pgd = pgd; spin_lock_init(&smmu_domain->lock); domain->priv = smmu_domain; @@ -980,6 +974,7 @@ out_free_domain: static void arm_smmu_free_ptes(pmd_t *pmd) { pgtable_t table = pmd_pgtable(*pmd); + pgtable_page_dtor(table); __free_page(table); } @@ -1021,8 +1016,8 @@ static void arm_smmu_free_puds(pgd_t *pgd) static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) { int i; - struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; - pgd_t *pgd, *pgd_base = root_cfg->pgd; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + pgd_t *pgd, *pgd_base = cfg->pgd; /* * Recursively free the page tables for this domain. We don't @@ -1054,7 +1049,7 @@ static void arm_smmu_domain_destroy(struct iommu_domain *domain) } static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, - struct arm_smmu_master *master) + struct arm_smmu_master_cfg *cfg) { int i; struct arm_smmu_smr *smrs; @@ -1063,18 +1058,18 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) return 0; - if (master->smrs) + if (cfg->smrs) return -EEXIST; - smrs = kmalloc(sizeof(*smrs) * master->num_streamids, GFP_KERNEL); + smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL); if (!smrs) { - dev_err(smmu->dev, "failed to allocate %d SMRs for master %s\n", - master->num_streamids, master->of_node->name); + dev_err(smmu->dev, "failed to allocate %d SMRs\n", + cfg->num_streamids); return -ENOMEM; } - /* Allocate the SMRs on the root SMMU */ - for (i = 0; i < master->num_streamids; ++i) { + /* Allocate the SMRs on the SMMU */ + for (i = 0; i < cfg->num_streamids; ++i) { int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, smmu->num_mapping_groups); if (IS_ERR_VALUE(idx)) { @@ -1085,18 +1080,18 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, smrs[i] = (struct arm_smmu_smr) { .idx = idx, .mask = 0, /* We don't currently share SMRs */ - .id = master->streamids[i], + .id = cfg->streamids[i], }; } /* It worked! Now, poke the actual hardware */ - for (i = 0; i < master->num_streamids; ++i) { + for (i = 0; i < cfg->num_streamids; ++i) { u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | smrs[i].mask << SMR_MASK_SHIFT; writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); } - master->smrs = smrs; + cfg->smrs = smrs; return 0; err_free_smrs: @@ -1107,68 +1102,55 @@ err_free_smrs: } static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, - struct arm_smmu_master *master) + struct arm_smmu_master_cfg *cfg) { int i; void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - struct arm_smmu_smr *smrs = master->smrs; + struct arm_smmu_smr *smrs = cfg->smrs; /* Invalidate the SMRs before freeing back to the allocator */ - for (i = 0; i < master->num_streamids; ++i) { + for (i = 0; i < cfg->num_streamids; ++i) { u8 idx = smrs[i].idx; + writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); __arm_smmu_free_bitmap(smmu->smr_map, idx); } - master->smrs = NULL; + cfg->smrs = NULL; kfree(smrs); } static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu, - struct arm_smmu_master *master) + struct arm_smmu_master_cfg *cfg) { int i; void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - for (i = 0; i < master->num_streamids; ++i) { - u16 sid = master->streamids[i]; + for (i = 0; i < cfg->num_streamids; ++i) { + u16 sid = cfg->streamids[i]; + writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(sid)); } } static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, - struct arm_smmu_master *master) + struct arm_smmu_master_cfg *cfg) { int i, ret; - struct arm_smmu_device *parent, *smmu = smmu_domain->root_cfg.smmu; + struct arm_smmu_device *smmu = smmu_domain->smmu; void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - ret = arm_smmu_master_configure_smrs(smmu, master); + ret = arm_smmu_master_configure_smrs(smmu, cfg); if (ret) return ret; - /* Bypass the leaves */ - smmu = smmu_domain->leaf_smmu; - while ((parent = find_parent_smmu(smmu))) { - /* - * We won't have a StreamID match for anything but the root - * smmu, so we only need to worry about StreamID indexing, - * where we must install bypass entries in the S2CRs. - */ - if (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) - continue; - - arm_smmu_bypass_stream_mapping(smmu, master); - smmu = parent; - } - - /* Now we're at the root, time to point at our context bank */ - for (i = 0; i < master->num_streamids; ++i) { + for (i = 0; i < cfg->num_streamids; ++i) { u32 idx, s2cr; - idx = master->smrs ? master->smrs[i].idx : master->streamids[i]; + + idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; s2cr = S2CR_TYPE_TRANS | - (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT); + (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT); writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); } @@ -1176,58 +1158,57 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, } static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, - struct arm_smmu_master *master) + struct arm_smmu_master_cfg *cfg) { - struct arm_smmu_device *smmu = smmu_domain->root_cfg.smmu; + struct arm_smmu_device *smmu = smmu_domain->smmu; /* * We *must* clear the S2CR first, because freeing the SMR means * that it can be re-allocated immediately. */ - arm_smmu_bypass_stream_mapping(smmu, master); - arm_smmu_master_free_smrs(smmu, master); + arm_smmu_bypass_stream_mapping(smmu, cfg); + arm_smmu_master_free_smrs(smmu, cfg); } static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { int ret = -EINVAL; struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_device *device_smmu = dev->archdata.iommu; - struct arm_smmu_master *master; + struct arm_smmu_device *smmu; + struct arm_smmu_master_cfg *cfg; unsigned long flags; - if (!device_smmu) { + smmu = dev_get_master_dev(dev)->archdata.iommu; + if (!smmu) { dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); return -ENXIO; } /* - * Sanity check the domain. We don't currently support domains - * that cross between different SMMU chains. + * Sanity check the domain. We don't support domains across + * different SMMUs. */ spin_lock_irqsave(&smmu_domain->lock, flags); - if (!smmu_domain->leaf_smmu) { + if (!smmu_domain->smmu) { /* Now that we have a master, we can finalise the domain */ - ret = arm_smmu_init_domain_context(domain, dev); + ret = arm_smmu_init_domain_context(domain, smmu); if (IS_ERR_VALUE(ret)) goto err_unlock; - - smmu_domain->leaf_smmu = device_smmu; - } else if (smmu_domain->leaf_smmu != device_smmu) { + } else if (smmu_domain->smmu != smmu) { dev_err(dev, "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", - dev_name(smmu_domain->leaf_smmu->dev), - dev_name(device_smmu->dev)); + dev_name(smmu_domain->smmu->dev), + dev_name(smmu->dev)); goto err_unlock; } spin_unlock_irqrestore(&smmu_domain->lock, flags); /* Looks ok, so add the device to the domain */ - master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); - if (!master) + cfg = find_smmu_master_cfg(smmu_domain->smmu, dev); + if (!cfg) return -ENODEV; - return arm_smmu_domain_add_master(smmu_domain, master); + return arm_smmu_domain_add_master(smmu_domain, cfg); err_unlock: spin_unlock_irqrestore(&smmu_domain->lock, flags); @@ -1237,11 +1218,11 @@ err_unlock: static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) { struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_master *master; + struct arm_smmu_master_cfg *cfg; - master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); - if (master) - arm_smmu_domain_remove_master(smmu_domain, master); + cfg = find_smmu_master_cfg(smmu_domain->smmu, dev); + if (cfg) + arm_smmu_domain_remove_master(smmu_domain, cfg); } static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, @@ -1261,6 +1242,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, if (pmd_none(*pmd)) { /* Allocate a new set of tables */ pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO); + if (!table) return -ENOMEM; @@ -1326,6 +1308,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, */ do { int i = 1; + pteval &= ~ARM_SMMU_PTE_CONT; if (arm_smmu_pte_is_contiguous_range(addr, end)) { @@ -1340,7 +1323,8 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1); cont_start = pmd_page_vaddr(*pmd) + idx; for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j) - pte_val(*(cont_start + j)) &= ~ARM_SMMU_PTE_CONT; + pte_val(*(cont_start + j)) &= + ~ARM_SMMU_PTE_CONT; arm_smmu_flush_pgtable(smmu, cont_start, sizeof(*pte) * @@ -1429,12 +1413,12 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, int ret, stage; unsigned long end; phys_addr_t input_mask, output_mask; - struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; - pgd_t *pgd = root_cfg->pgd; - struct arm_smmu_device *smmu = root_cfg->smmu; + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + pgd_t *pgd = cfg->pgd; unsigned long flags; - if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { + if (cfg->cbar == CBAR_TYPE_S2_TRANS) { stage = 2; output_mask = (1ULL << smmu->s2_output_size) - 1; } else { @@ -1484,10 +1468,6 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, if (!smmu_domain) return -ENODEV; - /* Check for silent address truncation up the SMMU chain. */ - if ((phys_addr_t)iova & ~smmu_domain->output_mask) - return -ERANGE; - return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); } @@ -1498,7 +1478,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, struct arm_smmu_domain *smmu_domain = domain->priv; ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); - arm_smmu_tlb_inv_context(&smmu_domain->root_cfg); + arm_smmu_tlb_inv_context(smmu_domain); return ret ? 0 : size; } @@ -1510,9 +1490,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, pmd_t pmd; pte_t pte; struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - pgdp = root_cfg->pgd; + pgdp = cfg->pgd; if (!pgdp) return 0; @@ -1538,19 +1518,29 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, static int arm_smmu_domain_has_cap(struct iommu_domain *domain, unsigned long cap) { - unsigned long caps = 0; struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_device *smmu = smmu_domain->smmu; + u32 features = smmu ? smmu->features : 0; + + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: + return features & ARM_SMMU_FEAT_COHERENT_WALK; + case IOMMU_CAP_INTR_REMAP: + return 1; /* MSIs are just memory writes */ + default: + return 0; + } +} - if (smmu_domain->root_cfg.smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) - caps |= IOMMU_CAP_CACHE_COHERENCY; - - return !!(cap & caps); +static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) +{ + *((u16 *)data) = alias; + return 0; /* Continue walking */ } static int arm_smmu_add_device(struct device *dev) { - struct arm_smmu_device *child, *parent, *smmu; - struct arm_smmu_master *master = NULL; + struct arm_smmu_device *smmu; struct iommu_group *group; int ret; @@ -1559,35 +1549,8 @@ static int arm_smmu_add_device(struct device *dev) return -EINVAL; } - spin_lock(&arm_smmu_devices_lock); - list_for_each_entry(parent, &arm_smmu_devices, list) { - smmu = parent; - - /* Try to find a child of the current SMMU. */ - list_for_each_entry(child, &arm_smmu_devices, list) { - if (child->parent_of_node == parent->dev->of_node) { - /* Does the child sit above our master? */ - master = find_smmu_master(child, dev->of_node); - if (master) { - smmu = NULL; - break; - } - } - } - - /* We found some children, so keep searching. */ - if (!smmu) { - master = NULL; - continue; - } - - master = find_smmu_master(smmu, dev->of_node); - if (master) - break; - } - spin_unlock(&arm_smmu_devices_lock); - - if (!master) + smmu = find_smmu_for_device(dev); + if (!smmu) return -ENODEV; group = iommu_group_alloc(); @@ -1596,20 +1559,45 @@ static int arm_smmu_add_device(struct device *dev) return PTR_ERR(group); } + if (dev_is_pci(dev)) { + struct arm_smmu_master_cfg *cfg; + struct pci_dev *pdev = to_pci_dev(dev); + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) { + ret = -ENOMEM; + goto out_put_group; + } + + cfg->num_streamids = 1; + /* + * Assume Stream ID == Requester ID for now. + * We need a way to describe the ID mappings in FDT. + */ + pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, + &cfg->streamids[0]); + dev->archdata.iommu = cfg; + } else { + dev->archdata.iommu = smmu; + } + ret = iommu_group_add_device(group, dev); - iommu_group_put(group); - dev->archdata.iommu = smmu; +out_put_group: + iommu_group_put(group); return ret; } static void arm_smmu_remove_device(struct device *dev) { + if (dev_is_pci(dev)) + kfree(dev->archdata.iommu); + dev->archdata.iommu = NULL; iommu_group_remove_device(dev); } -static struct iommu_ops arm_smmu_ops = { +static const struct iommu_ops arm_smmu_ops = { .domain_init = arm_smmu_domain_init, .domain_destroy = arm_smmu_domain_destroy, .attach_dev = arm_smmu_attach_dev, @@ -1639,7 +1627,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) /* Mark all SMRn as invalid and all S2CRn as bypass */ for (i = 0; i < smmu->num_mapping_groups; ++i) { writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i)); - writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); + writel_relaxed(S2CR_TYPE_BYPASS, + gr0_base + ARM_SMMU_GR0_S2CR(i)); } /* Make sure all context banks are disabled and clear CB_FSR */ @@ -1779,11 +1768,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K; /* Check for size mismatch of SMMU address space from mapped region */ - size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); + size = 1 << + (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); size *= (smmu->pagesize << 1); if (smmu->size != size) - dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs " - "from mapped region size (0x%lx)!\n", size, smmu->size); + dev_warn(smmu->dev, + "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", + size, smmu->size); smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; @@ -1804,14 +1795,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) * allocation (PTRS_PER_PGD). */ #ifdef CONFIG_64BIT - smmu->s1_output_size = min((unsigned long)VA_BITS, size); + smmu->s1_output_size = min_t(unsigned long, VA_BITS, size); #else smmu->s1_output_size = min(32UL, size); #endif /* The stage-2 output mask is also applied for bypass */ size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); - smmu->s2_output_size = min((unsigned long)PHYS_MASK_SHIFT, size); + smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); if (smmu->version == 1) { smmu->input_size = 32; @@ -1835,7 +1826,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) dev_notice(smmu->dev, "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n", - smmu->input_size, smmu->s1_output_size, smmu->s2_output_size); + smmu->input_size, smmu->s1_output_size, + smmu->s2_output_size); return 0; } @@ -1843,7 +1835,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) { struct resource *res; struct arm_smmu_device *smmu; - struct device_node *dev_node; struct device *dev = &pdev->dev; struct rb_node *node; struct of_phandle_args masterspec; @@ -1890,6 +1881,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) for (i = 0; i < num_irqs; ++i) { int irq = platform_get_irq(pdev, i); + if (irq < 0) { dev_err(dev, "failed to get irq index %d\n", i); return -ENODEV; @@ -1913,12 +1905,9 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) } dev_notice(dev, "registered %d master devices\n", i); - if ((dev_node = of_parse_phandle(dev->of_node, "smmu-parent", 0))) - smmu->parent_of_node = dev_node; - err = arm_smmu_device_cfg_probe(smmu); if (err) - goto out_put_parent; + goto out_put_masters; parse_driver_options(smmu); @@ -1928,7 +1917,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) "found only %d context interrupt(s) but %d required\n", smmu->num_context_irqs, smmu->num_context_banks); err = -ENODEV; - goto out_put_parent; + goto out_put_masters; } for (i = 0; i < smmu->num_global_irqs; ++i) { @@ -1956,14 +1945,10 @@ out_free_irqs: while (i--) free_irq(smmu->irqs[i], smmu); -out_put_parent: - if (smmu->parent_of_node) - of_node_put(smmu->parent_of_node); - out_put_masters: for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { - struct arm_smmu_master *master; - master = container_of(node, struct arm_smmu_master, node); + struct arm_smmu_master *master + = container_of(node, struct arm_smmu_master, node); of_node_put(master->of_node); } @@ -1990,12 +1975,9 @@ static int arm_smmu_device_remove(struct platform_device *pdev) if (!smmu) return -ENODEV; - if (smmu->parent_of_node) - of_node_put(smmu->parent_of_node); - for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { - struct arm_smmu_master *master; - master = container_of(node, struct arm_smmu_master, node); + struct arm_smmu_master *master + = container_of(node, struct arm_smmu_master, node); of_node_put(master->of_node); } @@ -2006,7 +1988,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) free_irq(smmu->irqs[i], smmu); /* Turn the thing off */ - writel(sCR0_CLIENTPD,ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); + writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); return 0; } @@ -2048,6 +2030,11 @@ static int __init arm_smmu_init(void) bus_set_iommu(&amba_bustype, &arm_smmu_ops); #endif +#ifdef CONFIG_PCI + if (!iommu_present(&pci_bus_type)) + bus_set_iommu(&pci_bus_type, &arm_smmu_ops); +#endif + return 0; } diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 9a4f05e5b23f..4306885f48b1 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -38,6 +38,7 @@ #include <linux/tboot.h> #include <linux/dmi.h> #include <linux/slab.h> +#include <linux/iommu.h> #include <asm/irq_remapping.h> #include <asm/iommu_table.h> @@ -980,6 +981,12 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) raw_spin_lock_init(&iommu->register_lock); drhd->iommu = iommu; + + if (intel_iommu_enabled) + iommu->iommu_dev = iommu_device_create(NULL, iommu, + intel_iommu_groups, + iommu->name); + return 0; err_unmap: @@ -991,6 +998,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) static void free_iommu(struct intel_iommu *iommu) { + iommu_device_destroy(iommu->iommu_dev); + if (iommu->irq) { free_irq(iommu->irq, iommu); irq_set_handler_data(iommu->irq, NULL); @@ -1339,9 +1348,6 @@ int dmar_enable_qi(struct intel_iommu *iommu) return -ENOMEM; } - qi->free_head = qi->free_tail = 0; - qi->free_cnt = QI_LENGTH; - raw_spin_lock_init(&qi->q_lock); __dmar_enable_qi(iommu); diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 99054d2c040d..d037e87a1fe5 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1170,7 +1170,7 @@ static void exynos_iommu_remove_device(struct device *dev) iommu_group_remove_device(dev); } -static struct iommu_ops exynos_iommu_ops = { +static const struct iommu_ops exynos_iommu_ops = { .domain_init = exynos_iommu_domain_init, .domain_destroy = exynos_iommu_domain_destroy, .attach_dev = exynos_iommu_attach_device, diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index b99dd88e31b9..2b6ce9387af1 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -92,7 +92,7 @@ struct gen_pool *spaace_pool; * subwindow count per liodn. * */ -u32 pamu_get_max_subwin_cnt() +u32 pamu_get_max_subwin_cnt(void) { return max_subwindow_count; } @@ -170,10 +170,10 @@ int pamu_disable_liodn(int liodn) static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) { /* Bug if not a power of 2 */ - BUG_ON(!is_power_of_2(addrspace_size)); + BUG_ON((addrspace_size & (addrspace_size - 1))); /* window size is 2^(WSE+1) bytes */ - return __ffs(addrspace_size) - 1; + return fls64(addrspace_size) - 2; } /* Derive the PAACE window count encoding for the subwindow count */ @@ -351,7 +351,7 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size, struct paace *ppaace; unsigned long fspi; - if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) { + if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) { pr_debug("window size too small or not a power of two %llx\n", win_size); return -EINVAL; } @@ -464,7 +464,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin, return -ENOENT; } - if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) { + if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) { pr_debug("subwindow size out of range, or not a power of 2\n"); return -EINVAL; } diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 93072ba44b1d..61d1dafa242d 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -38,7 +38,6 @@ #include <sysdev/fsl_pci.h> #include "fsl_pamu_domain.h" -#include "pci.h" /* * Global spinlock that needs to be held while @@ -301,7 +300,7 @@ static int check_size(u64 size, dma_addr_t iova) * Size must be a power of two and at least be equal * to PAMU page size. */ - if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) { + if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { pr_debug("%s: size too small or not a power of two\n", __func__); return -EINVAL; } @@ -335,11 +334,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void) return domain; } -static inline struct device_domain_info *find_domain(struct device *dev) -{ - return dev->archdata.iommu_domain; -} - static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) { unsigned long flags; @@ -380,7 +374,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d * Check here if the device is already attached to domain or not. * If the device is already attached to a domain detach it. */ - old_domain_info = find_domain(dev); + old_domain_info = dev->archdata.iommu_domain; if (old_domain_info && old_domain_info->domain != dma_domain) { spin_unlock_irqrestore(&device_domain_lock, flags); detach_device(dev, old_domain_info->domain); @@ -399,7 +393,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d * the info for the first LIODN as all * LIODNs share the same domain */ - if (!old_domain_info) + if (!dev->archdata.iommu_domain) dev->archdata.iommu_domain = info; spin_unlock_irqrestore(&device_domain_lock, flags); @@ -892,8 +886,6 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, return ret; } -#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) - static struct iommu_group *get_device_iommu_group(struct device *dev) { struct iommu_group *group; @@ -950,75 +942,14 @@ static struct iommu_group *get_pci_device_group(struct pci_dev *pdev) struct pci_controller *pci_ctl; bool pci_endpt_partioning; struct iommu_group *group = NULL; - struct pci_dev *bridge, *dma_pdev = NULL; pci_ctl = pci_bus_to_host(pdev->bus); pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl); /* We can partition PCIe devices so assign device group to the device */ if (pci_endpt_partioning) { - bridge = pci_find_upstream_pcie_bridge(pdev); - if (bridge) { - if (pci_is_pcie(bridge)) - dma_pdev = pci_get_domain_bus_and_slot( - pci_domain_nr(pdev->bus), - bridge->subordinate->number, 0); - if (!dma_pdev) - dma_pdev = pci_dev_get(bridge); - } else - dma_pdev = pci_dev_get(pdev); - - /* Account for quirked devices */ - swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); - - /* - * If it's a multifunction device that does not support our - * required ACS flags, add to the same group as lowest numbered - * function that also does not suport the required ACS flags. - */ - if (dma_pdev->multifunction && - !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { - u8 i, slot = PCI_SLOT(dma_pdev->devfn); - - for (i = 0; i < 8; i++) { - struct pci_dev *tmp; - - tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); - if (!tmp) - continue; - - if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { - swap_pci_ref(&dma_pdev, tmp); - break; - } - pci_dev_put(tmp); - } - } + group = iommu_group_get_for_dev(&pdev->dev); /* - * Devices on the root bus go through the iommu. If that's not us, - * find the next upstream device and test ACS up to the root bus. - * Finding the next device may require skipping virtual buses. - */ - while (!pci_is_root_bus(dma_pdev->bus)) { - struct pci_bus *bus = dma_pdev->bus; - - while (!bus->self) { - if (!pci_is_root_bus(bus)) - bus = bus->parent; - else - goto root_bus; - } - - if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) - break; - - swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); - } - -root_bus: - group = get_device_iommu_group(&dma_pdev->dev); - pci_dev_put(dma_pdev); - /* * PCIe controller is not a paritionable entity * free the controller device iommu_group. */ @@ -1042,12 +973,15 @@ root_bus: group = get_shared_pci_device_group(pdev); } + if (!group) + group = ERR_PTR(-ENODEV); + return group; } static int fsl_pamu_add_device(struct device *dev) { - struct iommu_group *group = NULL; + struct iommu_group *group = ERR_PTR(-ENODEV); struct pci_dev *pdev; const u32 *prop; int ret, len; @@ -1070,7 +1004,7 @@ static int fsl_pamu_add_device(struct device *dev) group = get_device_iommu_group(dev); } - if (!group || IS_ERR(group)) + if (IS_ERR(group)) return PTR_ERR(group); ret = iommu_group_add_device(group, dev); @@ -1118,8 +1052,7 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, ((w_count > 1) ? w_count : 0)); if (!ret) { - if (dma_domain->win_arr) - kfree(dma_domain->win_arr); + kfree(dma_domain->win_arr); dma_domain->win_arr = kzalloc(sizeof(struct dma_window) * w_count, GFP_ATOMIC); if (!dma_domain->win_arr) { @@ -1140,7 +1073,7 @@ static u32 fsl_pamu_get_windows(struct iommu_domain *domain) return dma_domain->win_cnt; } -static struct iommu_ops fsl_pamu_ops = { +static const struct iommu_ops fsl_pamu_ops = { .domain_init = fsl_pamu_domain_init, .domain_destroy = fsl_pamu_domain_destroy, .attach_dev = fsl_pamu_attach_device, @@ -1157,7 +1090,7 @@ static struct iommu_ops fsl_pamu_ops = { .remove_device = fsl_pamu_remove_device, }; -int pamu_domain_init() +int pamu_domain_init(void) { int ret = 0; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 51b6b77dc3e5..d1f5caad04f9 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -45,7 +45,6 @@ #include <asm/iommu.h> #include "irq_remapping.h" -#include "pci.h" #define ROOT_SIZE VTD_PAGE_SIZE #define CONTEXT_SIZE VTD_PAGE_SIZE @@ -304,7 +303,7 @@ static inline bool dma_pte_present(struct dma_pte *pte) static inline bool dma_pte_superpage(struct dma_pte *pte) { - return (pte->val & (1 << 7)); + return (pte->val & DMA_PTE_LARGE_PAGE); } static inline int first_pte_in_page(struct dma_pte *pte) @@ -321,16 +320,13 @@ static inline int first_pte_in_page(struct dma_pte *pte) static struct dmar_domain *si_domain; static int hw_pass_through = 1; -/* devices under the same p2p bridge are owned in one domain */ -#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) - /* domain represents a virtual machine, more than one devices * across iommus may be owned in one domain, e.g. kvm guest. */ -#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) +#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0) /* si_domain contains mulitple devices */ -#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2) +#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) /* define the limit of IOMMUs supported in each domain */ #ifdef CONFIG_X86 @@ -429,6 +425,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, struct device *dev); static void iommu_detach_dependent_devices(struct intel_iommu *iommu, struct device *dev); +static int domain_detach_iommu(struct dmar_domain *domain, + struct intel_iommu *iommu); #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON int dmar_disabled = 0; @@ -451,7 +449,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); static DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); -static struct iommu_ops intel_iommu_ops; +static const struct iommu_ops intel_iommu_ops; static int __init intel_iommu_setup(char *str) { @@ -540,6 +538,24 @@ void free_iova_mem(struct iova *iova) kmem_cache_free(iommu_iova_cache, iova); } +static inline int domain_type_is_vm(struct dmar_domain *domain) +{ + return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE; +} + +static inline int domain_type_is_vm_or_si(struct dmar_domain *domain) +{ + return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE | + DOMAIN_FLAG_STATIC_IDENTITY); +} + +static inline int domain_pfn_supported(struct dmar_domain *domain, + unsigned long pfn) +{ + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; + + return !(addr_width < BITS_PER_LONG && pfn >> addr_width); +} static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) { @@ -580,9 +596,7 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) int iommu_id; /* si_domain and vm domain should not get here. */ - BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); - BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY); - + BUG_ON(domain_type_is_vm_or_si(domain)); iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus); if (iommu_id < 0 || iommu_id >= g_num_of_iommus) return NULL; @@ -619,50 +633,56 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) rcu_read_unlock(); } -static void domain_update_iommu_snooping(struct dmar_domain *domain) +static int domain_update_iommu_snooping(struct intel_iommu *skip) { - int i; - - domain->iommu_snooping = 1; + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + int ret = 1; - for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { - if (!ecap_sc_support(g_iommus[i]->ecap)) { - domain->iommu_snooping = 0; - break; + rcu_read_lock(); + for_each_active_iommu(iommu, drhd) { + if (iommu != skip) { + if (!ecap_sc_support(iommu->ecap)) { + ret = 0; + break; + } } } + rcu_read_unlock(); + + return ret; } -static void domain_update_iommu_superpage(struct dmar_domain *domain) +static int domain_update_iommu_superpage(struct intel_iommu *skip) { struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu = NULL; + struct intel_iommu *iommu; int mask = 0xf; if (!intel_iommu_superpage) { - domain->iommu_superpage = 0; - return; + return 0; } /* set iommu_superpage to the smallest common denominator */ rcu_read_lock(); for_each_active_iommu(iommu, drhd) { - mask &= cap_super_page_val(iommu->cap); - if (!mask) { - break; + if (iommu != skip) { + mask &= cap_super_page_val(iommu->cap); + if (!mask) + break; } } rcu_read_unlock(); - domain->iommu_superpage = fls(mask); + return fls(mask); } /* Some capabilities may be different across iommus */ static void domain_update_iommu_cap(struct dmar_domain *domain) { domain_update_iommu_coherency(domain); - domain_update_iommu_snooping(domain); - domain_update_iommu_superpage(domain); + domain->iommu_snooping = domain_update_iommu_snooping(NULL); + domain->iommu_superpage = domain_update_iommu_superpage(NULL); } static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) @@ -671,7 +691,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf struct intel_iommu *iommu; struct device *tmp; struct pci_dev *ptmp, *pdev = NULL; - u16 segment; + u16 segment = 0; int i; if (dev_is_pci(dev)) { @@ -816,14 +836,13 @@ out: static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, unsigned long pfn, int *target_level) { - int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; struct dma_pte *parent, *pte = NULL; int level = agaw_to_level(domain->agaw); int offset; BUG_ON(!domain->pgd); - if (addr_width < BITS_PER_LONG && pfn >> addr_width) + if (!domain_pfn_supported(domain, pfn)) /* Address beyond IOMMU's addressing capabilities. */ return NULL; @@ -849,13 +868,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; - if (cmpxchg64(&pte->val, 0ULL, pteval)) { + if (cmpxchg64(&pte->val, 0ULL, pteval)) /* Someone else set it while we were thinking; use theirs. */ free_pgtable_page(tmp_page); - } else { - dma_pte_addr(pte); + else domain_flush_cache(domain, pte, sizeof(*pte)); - } } if (level == 1) break; @@ -892,7 +909,7 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, break; } - if (pte->val & DMA_PTE_LARGE_PAGE) { + if (dma_pte_superpage(pte)) { *large_page = total; return pte; } @@ -908,12 +925,11 @@ static void dma_pte_clear_range(struct dmar_domain *domain, unsigned long start_pfn, unsigned long last_pfn) { - int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; unsigned int large_page = 1; struct dma_pte *first_pte, *pte; - BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); - BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); + BUG_ON(!domain_pfn_supported(domain, start_pfn)); + BUG_ON(!domain_pfn_supported(domain, last_pfn)); BUG_ON(start_pfn > last_pfn); /* we don't need lock here; nobody else touches the iova range */ @@ -974,12 +990,12 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, unsigned long start_pfn, unsigned long last_pfn) { - int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; - - BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); - BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); + BUG_ON(!domain_pfn_supported(domain, start_pfn)); + BUG_ON(!domain_pfn_supported(domain, last_pfn)); BUG_ON(start_pfn > last_pfn); + dma_pte_clear_range(domain, start_pfn, last_pfn); + /* We don't need lock here; nobody else touches the iova range */ dma_pte_free_level(domain, agaw_to_level(domain->agaw), domain->pgd, 0, start_pfn, last_pfn); @@ -1077,11 +1093,10 @@ struct page *domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, unsigned long last_pfn) { - int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; struct page *freelist = NULL; - BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); - BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); + BUG_ON(!domain_pfn_supported(domain, start_pfn)); + BUG_ON(!domain_pfn_supported(domain, last_pfn)); BUG_ON(start_pfn > last_pfn); /* we don't need lock here; nobody else touches the iova range */ @@ -1275,7 +1290,8 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, spin_lock_irqsave(&device_domain_lock, flags); list_for_each_entry(info, &domain->devices, link) - if (info->bus == bus && info->devfn == devfn) { + if (info->iommu == iommu && info->bus == bus && + info->devfn == devfn) { found = 1; break; } @@ -1384,7 +1400,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) raw_spin_unlock_irqrestore(&iommu->register_lock, flags); } -static int iommu_enable_translation(struct intel_iommu *iommu) +static void iommu_enable_translation(struct intel_iommu *iommu) { u32 sts; unsigned long flags; @@ -1398,10 +1414,9 @@ static int iommu_enable_translation(struct intel_iommu *iommu) readl, (sts & DMA_GSTS_TES), sts); raw_spin_unlock_irqrestore(&iommu->register_lock, flags); - return 0; } -static int iommu_disable_translation(struct intel_iommu *iommu) +static void iommu_disable_translation(struct intel_iommu *iommu) { u32 sts; unsigned long flag; @@ -1415,7 +1430,6 @@ static int iommu_disable_translation(struct intel_iommu *iommu) readl, (!(sts & DMA_GSTS_TES)), sts); raw_spin_unlock_irqrestore(&iommu->register_lock, flag); - return 0; } @@ -1462,8 +1476,7 @@ static int iommu_init_domains(struct intel_iommu *iommu) static void free_dmar_iommu(struct intel_iommu *iommu) { struct dmar_domain *domain; - int i, count; - unsigned long flags; + int i; if ((iommu->domains) && (iommu->domain_ids)) { for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { @@ -1476,11 +1489,8 @@ static void free_dmar_iommu(struct intel_iommu *iommu) domain = iommu->domains[i]; clear_bit(i, iommu->domain_ids); - - spin_lock_irqsave(&domain->iommu_lock, flags); - count = --domain->iommu_count; - spin_unlock_irqrestore(&domain->iommu_lock, flags); - if (count == 0) + if (domain_detach_iommu(domain, iommu) == 0 && + !domain_type_is_vm(domain)) domain_exit(domain); } } @@ -1499,7 +1509,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu) free_context_table(iommu); } -static struct dmar_domain *alloc_domain(bool vm) +static struct dmar_domain *alloc_domain(int flags) { /* domain id for virtual machine, it won't be set in context */ static atomic_t vm_domid = ATOMIC_INIT(0); @@ -1509,46 +1519,62 @@ static struct dmar_domain *alloc_domain(bool vm) if (!domain) return NULL; + memset(domain, 0, sizeof(*domain)); domain->nid = -1; - domain->iommu_count = 0; - memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp)); - domain->flags = 0; + domain->flags = flags; spin_lock_init(&domain->iommu_lock); INIT_LIST_HEAD(&domain->devices); - if (vm) { + if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE) domain->id = atomic_inc_return(&vm_domid); - domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; - } return domain; } -static int iommu_attach_domain(struct dmar_domain *domain, - struct intel_iommu *iommu) +static int __iommu_attach_domain(struct dmar_domain *domain, + struct intel_iommu *iommu) { int num; unsigned long ndomains; - unsigned long flags; ndomains = cap_ndoms(iommu->cap); - - spin_lock_irqsave(&iommu->lock, flags); - num = find_first_zero_bit(iommu->domain_ids, ndomains); - if (num >= ndomains) { - spin_unlock_irqrestore(&iommu->lock, flags); - printk(KERN_ERR "IOMMU: no free domain ids\n"); - return -ENOMEM; + if (num < ndomains) { + set_bit(num, iommu->domain_ids); + iommu->domains[num] = domain; + } else { + num = -ENOSPC; } - domain->id = num; - domain->iommu_count++; - set_bit(num, iommu->domain_ids); - set_bit(iommu->seq_id, domain->iommu_bmp); - iommu->domains[num] = domain; + return num; +} + +static int iommu_attach_domain(struct dmar_domain *domain, + struct intel_iommu *iommu) +{ + int num; + unsigned long flags; + + spin_lock_irqsave(&iommu->lock, flags); + num = __iommu_attach_domain(domain, iommu); spin_unlock_irqrestore(&iommu->lock, flags); + if (num < 0) + pr_err("IOMMU: no free domain ids\n"); - return 0; + return num; +} + +static int iommu_attach_vm_domain(struct dmar_domain *domain, + struct intel_iommu *iommu) +{ + int num; + unsigned long ndomains; + + ndomains = cap_ndoms(iommu->cap); + for_each_set_bit(num, iommu->domain_ids, ndomains) + if (iommu->domains[num] == domain) + return num; + + return __iommu_attach_domain(domain, iommu); } static void iommu_detach_domain(struct dmar_domain *domain, @@ -1558,17 +1584,53 @@ static void iommu_detach_domain(struct dmar_domain *domain, int num, ndomains; spin_lock_irqsave(&iommu->lock, flags); - ndomains = cap_ndoms(iommu->cap); - for_each_set_bit(num, iommu->domain_ids, ndomains) { - if (iommu->domains[num] == domain) { - clear_bit(num, iommu->domain_ids); - iommu->domains[num] = NULL; - break; + if (domain_type_is_vm_or_si(domain)) { + ndomains = cap_ndoms(iommu->cap); + for_each_set_bit(num, iommu->domain_ids, ndomains) { + if (iommu->domains[num] == domain) { + clear_bit(num, iommu->domain_ids); + iommu->domains[num] = NULL; + break; + } } + } else { + clear_bit(domain->id, iommu->domain_ids); + iommu->domains[domain->id] = NULL; } spin_unlock_irqrestore(&iommu->lock, flags); } +static void domain_attach_iommu(struct dmar_domain *domain, + struct intel_iommu *iommu) +{ + unsigned long flags; + + spin_lock_irqsave(&domain->iommu_lock, flags); + if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) { + domain->iommu_count++; + if (domain->iommu_count == 1) + domain->nid = iommu->node; + domain_update_iommu_cap(domain); + } + spin_unlock_irqrestore(&domain->iommu_lock, flags); +} + +static int domain_detach_iommu(struct dmar_domain *domain, + struct intel_iommu *iommu) +{ + unsigned long flags; + int count = INT_MAX; + + spin_lock_irqsave(&domain->iommu_lock, flags); + if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) { + count = --domain->iommu_count; + domain_update_iommu_cap(domain); + } + spin_unlock_irqrestore(&domain->iommu_lock, flags); + + return count; +} + static struct iova_domain reserved_iova_list; static struct lock_class_key reserved_rbtree_key; @@ -1706,9 +1768,7 @@ static void domain_exit(struct dmar_domain *domain) /* clear attached or cached domains */ rcu_read_lock(); for_each_active_iommu(iommu, drhd) - if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || - test_bit(iommu->seq_id, domain->iommu_bmp)) - iommu_detach_domain(domain, iommu); + iommu_detach_domain(domain, iommu); rcu_read_unlock(); dma_free_pagelist(freelist); @@ -1723,8 +1783,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, struct context_entry *context; unsigned long flags; struct dma_pte *pgd; - unsigned long num; - unsigned long ndomains; int id; int agaw; struct device_domain_info *info = NULL; @@ -1748,31 +1806,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain, id = domain->id; pgd = domain->pgd; - if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || - domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) { - int found = 0; - - /* find an available domain id for this device in iommu */ - ndomains = cap_ndoms(iommu->cap); - for_each_set_bit(num, iommu->domain_ids, ndomains) { - if (iommu->domains[num] == domain) { - id = num; - found = 1; - break; - } - } - - if (found == 0) { - num = find_first_zero_bit(iommu->domain_ids, ndomains); - if (num >= ndomains) { + if (domain_type_is_vm_or_si(domain)) { + if (domain_type_is_vm(domain)) { + id = iommu_attach_vm_domain(domain, iommu); + if (id < 0) { spin_unlock_irqrestore(&iommu->lock, flags); - printk(KERN_ERR "IOMMU: no free domain ids\n"); + pr_err("IOMMU: no free domain ids\n"); return -EFAULT; } - - set_bit(num, iommu->domain_ids); - iommu->domains[num] = domain; - id = num; } /* Skip top levels of page tables for @@ -1824,72 +1865,68 @@ static int domain_context_mapping_one(struct dmar_domain *domain, (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL); - iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH); + iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH); } else { iommu_flush_write_buffer(iommu); } iommu_enable_dev_iotlb(info); spin_unlock_irqrestore(&iommu->lock, flags); - spin_lock_irqsave(&domain->iommu_lock, flags); - if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) { - domain->iommu_count++; - if (domain->iommu_count == 1) - domain->nid = iommu->node; - domain_update_iommu_cap(domain); - } - spin_unlock_irqrestore(&domain->iommu_lock, flags); + domain_attach_iommu(domain, iommu); + return 0; } +struct domain_context_mapping_data { + struct dmar_domain *domain; + struct intel_iommu *iommu; + int translation; +}; + +static int domain_context_mapping_cb(struct pci_dev *pdev, + u16 alias, void *opaque) +{ + struct domain_context_mapping_data *data = opaque; + + return domain_context_mapping_one(data->domain, data->iommu, + PCI_BUS_NUM(alias), alias & 0xff, + data->translation); +} + static int domain_context_mapping(struct dmar_domain *domain, struct device *dev, int translation) { - int ret; - struct pci_dev *pdev, *tmp, *parent; struct intel_iommu *iommu; u8 bus, devfn; + struct domain_context_mapping_data data; iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return -ENODEV; - ret = domain_context_mapping_one(domain, iommu, bus, devfn, - translation); - if (ret || !dev_is_pci(dev)) - return ret; - - /* dependent device mapping */ - pdev = to_pci_dev(dev); - tmp = pci_find_upstream_pcie_bridge(pdev); - if (!tmp) - return 0; - /* Secondary interface's bus number and devfn 0 */ - parent = pdev->bus->self; - while (parent != tmp) { - ret = domain_context_mapping_one(domain, iommu, - parent->bus->number, - parent->devfn, translation); - if (ret) - return ret; - parent = parent->bus->self; - } - if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ - return domain_context_mapping_one(domain, iommu, - tmp->subordinate->number, 0, - translation); - else /* this is a legacy PCI bridge */ - return domain_context_mapping_one(domain, iommu, - tmp->bus->number, - tmp->devfn, + if (!dev_is_pci(dev)) + return domain_context_mapping_one(domain, iommu, bus, devfn, translation); + + data.domain = domain; + data.iommu = iommu; + data.translation = translation; + + return pci_for_each_dma_alias(to_pci_dev(dev), + &domain_context_mapping_cb, &data); +} + +static int domain_context_mapped_cb(struct pci_dev *pdev, + u16 alias, void *opaque) +{ + struct intel_iommu *iommu = opaque; + + return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff); } static int domain_context_mapped(struct device *dev) { - int ret; - struct pci_dev *pdev, *tmp, *parent; struct intel_iommu *iommu; u8 bus, devfn; @@ -1897,30 +1934,11 @@ static int domain_context_mapped(struct device *dev) if (!iommu) return -ENODEV; - ret = device_context_mapped(iommu, bus, devfn); - if (!ret || !dev_is_pci(dev)) - return ret; + if (!dev_is_pci(dev)) + return device_context_mapped(iommu, bus, devfn); - /* dependent device mapping */ - pdev = to_pci_dev(dev); - tmp = pci_find_upstream_pcie_bridge(pdev); - if (!tmp) - return ret; - /* Secondary interface's bus number and devfn 0 */ - parent = pdev->bus->self; - while (parent != tmp) { - ret = device_context_mapped(iommu, parent->bus->number, - parent->devfn); - if (!ret) - return ret; - parent = parent->bus->self; - } - if (pci_is_pcie(tmp)) - return device_context_mapped(iommu, tmp->subordinate->number, - 0); - else - return device_context_mapped(iommu, tmp->bus->number, - tmp->devfn); + return !pci_for_each_dma_alias(to_pci_dev(dev), + domain_context_mapped_cb, iommu); } /* Returns a number of VTD pages, but aligned to MM page size */ @@ -1965,12 +1983,11 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, { struct dma_pte *first_pte = NULL, *pte = NULL; phys_addr_t uninitialized_var(pteval); - int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; unsigned long sg_res; unsigned int largepage_lvl = 0; unsigned long lvl_pages = 0; - BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width); + BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)); if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) return -EINVAL; @@ -2004,12 +2021,14 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, /* It is large page*/ if (largepage_lvl > 1) { pteval |= DMA_PTE_LARGE_PAGE; - /* Ensure that old small page tables are removed to make room - for superpage, if they exist. */ - dma_pte_clear_range(domain, iov_pfn, - iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); + lvl_pages = lvl_to_nr_pages(largepage_lvl); + /* + * Ensure that old small page tables are + * removed to make room for superpage, + * if they exist. + */ dma_pte_free_pagetable(domain, iov_pfn, - iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); + iov_pfn + lvl_pages - 1); } else { pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; } @@ -2102,31 +2121,20 @@ static inline void unlink_domain_info(struct device_domain_info *info) static void domain_remove_dev_info(struct dmar_domain *domain) { - struct device_domain_info *info; - unsigned long flags, flags2; + struct device_domain_info *info, *tmp; + unsigned long flags; spin_lock_irqsave(&device_domain_lock, flags); - while (!list_empty(&domain->devices)) { - info = list_entry(domain->devices.next, - struct device_domain_info, link); + list_for_each_entry_safe(info, tmp, &domain->devices, link) { unlink_domain_info(info); spin_unlock_irqrestore(&device_domain_lock, flags); iommu_disable_dev_iotlb(info); iommu_detach_dev(info->iommu, info->bus, info->devfn); - if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { + if (domain_type_is_vm(domain)) { iommu_detach_dependent_devices(info->iommu, info->dev); - /* clear this iommu in iommu_bmp, update iommu count - * and capabilities - */ - spin_lock_irqsave(&domain->iommu_lock, flags2); - if (test_and_clear_bit(info->iommu->seq_id, - domain->iommu_bmp)) { - domain->iommu_count--; - domain_update_iommu_cap(domain); - } - spin_unlock_irqrestore(&domain->iommu_lock, flags2); + domain_detach_iommu(domain, info->iommu); } free_devinfo_mem(info); @@ -2181,8 +2189,6 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu, info->dev = dev; info->domain = domain; info->iommu = iommu; - if (!dev) - domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; spin_lock_irqsave(&device_domain_lock, flags); if (dev) @@ -2209,79 +2215,86 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu, return domain; } +static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque) +{ + *(u16 *)opaque = alias; + return 0; +} + /* domain is initialized */ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) { - struct dmar_domain *domain, *free = NULL; - struct intel_iommu *iommu = NULL; + struct dmar_domain *domain, *tmp; + struct intel_iommu *iommu; struct device_domain_info *info; - struct pci_dev *dev_tmp = NULL; + u16 dma_alias; unsigned long flags; - u8 bus, devfn, bridge_bus, bridge_devfn; + u8 bus, devfn; domain = find_domain(dev); if (domain) return domain; + iommu = device_to_iommu(dev, &bus, &devfn); + if (!iommu) + return NULL; + if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); - u16 segment; - segment = pci_domain_nr(pdev->bus); - dev_tmp = pci_find_upstream_pcie_bridge(pdev); - if (dev_tmp) { - if (pci_is_pcie(dev_tmp)) { - bridge_bus = dev_tmp->subordinate->number; - bridge_devfn = 0; - } else { - bridge_bus = dev_tmp->bus->number; - bridge_devfn = dev_tmp->devfn; - } - spin_lock_irqsave(&device_domain_lock, flags); - info = dmar_search_domain_by_dev_info(segment, - bridge_bus, - bridge_devfn); - if (info) { - iommu = info->iommu; - domain = info->domain; - } - spin_unlock_irqrestore(&device_domain_lock, flags); - /* pcie-pci bridge already has a domain, uses it */ - if (info) - goto found_domain; + pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias); + + spin_lock_irqsave(&device_domain_lock, flags); + info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus), + PCI_BUS_NUM(dma_alias), + dma_alias & 0xff); + if (info) { + iommu = info->iommu; + domain = info->domain; } - } + spin_unlock_irqrestore(&device_domain_lock, flags); - iommu = device_to_iommu(dev, &bus, &devfn); - if (!iommu) - goto error; + /* DMA alias already has a domain, uses it */ + if (info) + goto found_domain; + } /* Allocate and initialize new domain for the device */ - domain = alloc_domain(false); + domain = alloc_domain(0); if (!domain) - goto error; - if (iommu_attach_domain(domain, iommu)) { + return NULL; + domain->id = iommu_attach_domain(domain, iommu); + if (domain->id < 0) { free_domain_mem(domain); - domain = NULL; - goto error; + return NULL; } - free = domain; - if (domain_init(domain, gaw)) - goto error; + domain_attach_iommu(domain, iommu); + if (domain_init(domain, gaw)) { + domain_exit(domain); + return NULL; + } + + /* register PCI DMA alias device */ + if (dev_is_pci(dev)) { + tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias), + dma_alias & 0xff, NULL, domain); + + if (!tmp || tmp != domain) { + domain_exit(domain); + domain = tmp; + } - /* register pcie-to-pci device */ - if (dev_tmp) { - domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn, - NULL, domain); if (!domain) - goto error; + return NULL; } found_domain: - domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); -error: - if (free != domain) - domain_exit(free); + tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); + + if (!tmp || tmp != domain) { + domain_exit(domain); + domain = tmp; + } return domain; } @@ -2405,6 +2418,7 @@ static inline void iommu_prepare_isa(void) printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " "floppy might not work\n"); + pci_dev_put(pdev); } #else static inline void iommu_prepare_isa(void) @@ -2420,19 +2434,25 @@ static int __init si_domain_init(int hw) struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; int nid, ret = 0; + bool first = true; - si_domain = alloc_domain(false); + si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY); if (!si_domain) return -EFAULT; - si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; - for_each_active_iommu(iommu, drhd) { ret = iommu_attach_domain(si_domain, iommu); - if (ret) { + if (ret < 0) { + domain_exit(si_domain); + return -EFAULT; + } else if (first) { + si_domain->id = ret; + first = false; + } else if (si_domain->id != ret) { domain_exit(si_domain); return -EFAULT; } + domain_attach_iommu(si_domain, iommu); } if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { @@ -2523,22 +2543,46 @@ static bool device_has_rmrr(struct device *dev) return false; } +/* + * There are a couple cases where we need to restrict the functionality of + * devices associated with RMRRs. The first is when evaluating a device for + * identity mapping because problems exist when devices are moved in and out + * of domains and their respective RMRR information is lost. This means that + * a device with associated RMRRs will never be in a "passthrough" domain. + * The second is use of the device through the IOMMU API. This interface + * expects to have full control of the IOVA space for the device. We cannot + * satisfy both the requirement that RMRR access is maintained and have an + * unencumbered IOVA space. We also have no ability to quiesce the device's + * use of the RMRR space or even inform the IOMMU API user of the restriction. + * We therefore prevent devices associated with an RMRR from participating in + * the IOMMU API, which eliminates them from device assignment. + * + * In both cases we assume that PCI USB devices with RMRRs have them largely + * for historical reasons and that the RMRR space is not actively used post + * boot. This exclusion may change if vendors begin to abuse it. + */ +static bool device_is_rmrr_locked(struct device *dev) +{ + if (!device_has_rmrr(dev)) + return false; + + if (dev_is_pci(dev)) { + struct pci_dev *pdev = to_pci_dev(dev); + + if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB) + return false; + } + + return true; +} + static int iommu_should_identity_map(struct device *dev, int startup) { if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); - /* - * We want to prevent any device associated with an RMRR from - * getting placed into the SI Domain. This is done because - * problems exist when devices are moved in and out of domains - * and their respective RMRR info is lost. We exempt USB devices - * from this process due to their usage of RMRRs that are known - * to not be needed after BIOS hand-off to OS. - */ - if (device_has_rmrr(dev) && - (pdev->class >> 8) != PCI_CLASS_SERIAL_USB) + if (device_is_rmrr_locked(dev)) return 0; if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) @@ -2850,11 +2894,7 @@ static int __init init_dmars(void) iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); - - ret = iommu_enable_translation(iommu); - if (ret) - goto free_iommu; - + iommu_enable_translation(iommu); iommu_disable_protect_mem_regions(iommu); } @@ -3091,10 +3131,10 @@ static void flush_unmaps(void) /* On real hardware multiple invalidations are expensive */ if (cap_caching_mode(iommu->cap)) iommu_flush_iotlb_psi(iommu, domain->id, - iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, + iova->pfn_lo, iova_size(iova), !deferred_flush[i].freelist[j], 0); else { - mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); + mask = ilog2(mm_to_dma_pfn(iova_size(iova))); iommu_flush_dev_iotlb(deferred_flush[i].domain[j], (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); } @@ -3144,9 +3184,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *f spin_unlock_irqrestore(&async_umap_flush_lock, flags); } -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) +static void intel_unmap(struct device *dev, dma_addr_t dev_addr) { struct dmar_domain *domain; unsigned long start_pfn, last_pfn; @@ -3190,6 +3228,13 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, } } +static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + intel_unmap(dev, dev_addr); +} + static void *intel_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) @@ -3246,7 +3291,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, size = PAGE_ALIGN(size); order = get_order(size); - intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); + intel_unmap(dev, dma_handle); if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) __free_pages(page, order); } @@ -3255,43 +3300,7 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { - struct dmar_domain *domain; - unsigned long start_pfn, last_pfn; - struct iova *iova; - struct intel_iommu *iommu; - struct page *freelist; - - if (iommu_no_mapping(dev)) - return; - - domain = find_domain(dev); - BUG_ON(!domain); - - iommu = domain_get_iommu(domain); - - iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); - if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n", - (unsigned long long)sglist[0].dma_address)) - return; - - start_pfn = mm_to_dma_pfn(iova->pfn_lo); - last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; - - freelist = domain_unmap(domain, start_pfn, last_pfn); - - if (intel_iommu_strict) { - iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, - last_pfn - start_pfn + 1, !freelist, 0); - /* free iova */ - __free_iova(&domain->iovad, iova); - dma_free_pagelist(freelist); - } else { - add_unmap(domain, iova, freelist); - /* - * queue up the release of the unmap to save the 1/6th of the - * cpu used up by the iotlb flush operation... - */ - } + intel_unmap(dev, sglist[0].dma_address); } static int intel_nontranslate_map_sg(struct device *hddev, @@ -3355,13 +3364,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); if (unlikely(ret)) { - /* clear the page */ - dma_pte_clear_range(domain, start_vpfn, - start_vpfn + size - 1); - /* free page tables */ dma_pte_free_pagetable(domain, start_vpfn, start_vpfn + size - 1); - /* free iova */ __free_iova(&domain->iovad, iova); return 0; } @@ -3568,10 +3572,8 @@ static int init_iommu_hw(void) iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); - iommu->flush.flush_iotlb(iommu, 0, 0, 0, - DMA_TLB_GLOBAL_FLUSH); - if (iommu_enable_translation(iommu)) - return 1; + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); + iommu_enable_translation(iommu); iommu_disable_protect_mem_regions(iommu); } @@ -3873,9 +3875,7 @@ static int device_notifier(struct notifier_block *nb, down_read(&dmar_global_lock); domain_remove_one_dev_info(domain, dev); - if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && - !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && - list_empty(&domain->devices)) + if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices)) domain_exit(domain); up_read(&dmar_global_lock); @@ -3935,8 +3935,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb, rcu_read_lock(); for_each_active_iommu(iommu, drhd) iommu_flush_iotlb_psi(iommu, si_domain->id, - iova->pfn_lo, - iova->pfn_hi - iova->pfn_lo + 1, + iova->pfn_lo, iova_size(iova), !freelist, 0); rcu_read_unlock(); dma_free_pagelist(freelist); @@ -3955,6 +3954,63 @@ static struct notifier_block intel_iommu_memory_nb = { .priority = 0 }; + +static ssize_t intel_iommu_show_version(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct intel_iommu *iommu = dev_get_drvdata(dev); + u32 ver = readl(iommu->reg + DMAR_VER_REG); + return sprintf(buf, "%d:%d\n", + DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver)); +} +static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL); + +static ssize_t intel_iommu_show_address(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct intel_iommu *iommu = dev_get_drvdata(dev); + return sprintf(buf, "%llx\n", iommu->reg_phys); +} +static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL); + +static ssize_t intel_iommu_show_cap(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct intel_iommu *iommu = dev_get_drvdata(dev); + return sprintf(buf, "%llx\n", iommu->cap); +} +static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL); + +static ssize_t intel_iommu_show_ecap(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct intel_iommu *iommu = dev_get_drvdata(dev); + return sprintf(buf, "%llx\n", iommu->ecap); +} +static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL); + +static struct attribute *intel_iommu_attrs[] = { + &dev_attr_version.attr, + &dev_attr_address.attr, + &dev_attr_cap.attr, + &dev_attr_ecap.attr, + NULL, +}; + +static struct attribute_group intel_iommu_group = { + .name = "intel-iommu", + .attrs = intel_iommu_attrs, +}; + +const struct attribute_group *intel_iommu_groups[] = { + &intel_iommu_group, + NULL, +}; + int __init intel_iommu_init(void) { int ret = -ENODEV; @@ -4026,6 +4082,11 @@ int __init intel_iommu_init(void) init_iommu_pm_ops(); + for_each_active_iommu(iommu, drhd) + iommu->iommu_dev = iommu_device_create(NULL, iommu, + intel_iommu_groups, + iommu->name); + bus_set_iommu(&pci_bus_type, &intel_iommu_ops); bus_register_notifier(&pci_bus_type, &device_nb); if (si_domain && !hw_pass_through) @@ -4044,33 +4105,27 @@ out_free_dmar: return ret; } +static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque) +{ + struct intel_iommu *iommu = opaque; + + iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff); + return 0; +} + +/* + * NB - intel-iommu lacks any sort of reference counting for the users of + * dependent devices. If multiple endpoints have intersecting dependent + * devices, unbinding the driver from any one of them will possibly leave + * the others unable to operate. + */ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, struct device *dev) { - struct pci_dev *tmp, *parent, *pdev; - if (!iommu || !dev || !dev_is_pci(dev)) return; - pdev = to_pci_dev(dev); - - /* dependent device detach */ - tmp = pci_find_upstream_pcie_bridge(pdev); - /* Secondary interface's bus number and devfn 0 */ - if (tmp) { - parent = pdev->bus->self; - while (parent != tmp) { - iommu_detach_dev(iommu, parent->bus->number, - parent->devfn); - parent = parent->bus->self; - } - if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ - iommu_detach_dev(iommu, - tmp->subordinate->number, 0); - else /* this is a legacy PCI bridge */ - iommu_detach_dev(iommu, tmp->bus->number, - tmp->devfn); - } + pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu); } static void domain_remove_one_dev_info(struct dmar_domain *domain, @@ -4117,20 +4172,9 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, spin_unlock_irqrestore(&device_domain_lock, flags); if (found == 0) { - unsigned long tmp_flags; - spin_lock_irqsave(&domain->iommu_lock, tmp_flags); - clear_bit(iommu->seq_id, domain->iommu_bmp); - domain->iommu_count--; - domain_update_iommu_cap(domain); - spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); - - if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && - !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) { - spin_lock_irqsave(&iommu->lock, tmp_flags); - clear_bit(domain->id, iommu->domain_ids); - iommu->domains[domain->id] = NULL; - spin_unlock_irqrestore(&iommu->lock, tmp_flags); - } + domain_detach_iommu(domain, iommu); + if (!domain_type_is_vm_or_si(domain)) + iommu_detach_domain(domain, iommu); } } @@ -4150,7 +4194,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) domain->iommu_snooping = 0; domain->iommu_superpage = 0; domain->max_addr = 0; - domain->nid = -1; /* always allocate the top pgd */ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); @@ -4164,7 +4207,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) { struct dmar_domain *dmar_domain; - dmar_domain = alloc_domain(true); + dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE); if (!dmar_domain) { printk(KERN_ERR "intel_iommu_domain_init: dmar_domain == NULL\n"); @@ -4202,14 +4245,18 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, int addr_width; u8 bus, devfn; + if (device_is_rmrr_locked(dev)) { + dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n"); + return -EPERM; + } + /* normally dev is not mapped */ if (unlikely(domain_context_mapped(dev))) { struct dmar_domain *old_domain; old_domain = find_domain(dev); if (old_domain) { - if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || - dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) + if (domain_type_is_vm_or_si(dmar_domain)) domain_remove_one_dev_info(old_domain, dev); else domain_remove_dev_info(old_domain); @@ -4373,99 +4420,42 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain, return 0; } -#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) - static int intel_iommu_add_device(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct pci_dev *bridge, *dma_pdev = NULL; + struct intel_iommu *iommu; struct iommu_group *group; - int ret; u8 bus, devfn; - if (!device_to_iommu(dev, &bus, &devfn)) + iommu = device_to_iommu(dev, &bus, &devfn); + if (!iommu) return -ENODEV; - bridge = pci_find_upstream_pcie_bridge(pdev); - if (bridge) { - if (pci_is_pcie(bridge)) - dma_pdev = pci_get_domain_bus_and_slot( - pci_domain_nr(pdev->bus), - bridge->subordinate->number, 0); - if (!dma_pdev) - dma_pdev = pci_dev_get(bridge); - } else - dma_pdev = pci_dev_get(pdev); - - /* Account for quirked devices */ - swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); + iommu_device_link(iommu->iommu_dev, dev); - /* - * If it's a multifunction device that does not support our - * required ACS flags, add to the same group as lowest numbered - * function that also does not suport the required ACS flags. - */ - if (dma_pdev->multifunction && - !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { - u8 i, slot = PCI_SLOT(dma_pdev->devfn); - - for (i = 0; i < 8; i++) { - struct pci_dev *tmp; + group = iommu_group_get_for_dev(dev); - tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); - if (!tmp) - continue; - - if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { - swap_pci_ref(&dma_pdev, tmp); - break; - } - pci_dev_put(tmp); - } - } - - /* - * Devices on the root bus go through the iommu. If that's not us, - * find the next upstream device and test ACS up to the root bus. - * Finding the next device may require skipping virtual buses. - */ - while (!pci_is_root_bus(dma_pdev->bus)) { - struct pci_bus *bus = dma_pdev->bus; - - while (!bus->self) { - if (!pci_is_root_bus(bus)) - bus = bus->parent; - else - goto root_bus; - } - - if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) - break; - - swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); - } - -root_bus: - group = iommu_group_get(&dma_pdev->dev); - pci_dev_put(dma_pdev); - if (!group) { - group = iommu_group_alloc(); - if (IS_ERR(group)) - return PTR_ERR(group); - } - - ret = iommu_group_add_device(group, dev); + if (IS_ERR(group)) + return PTR_ERR(group); iommu_group_put(group); - return ret; + return 0; } static void intel_iommu_remove_device(struct device *dev) { + struct intel_iommu *iommu; + u8 bus, devfn; + + iommu = device_to_iommu(dev, &bus, &devfn); + if (!iommu) + return; + iommu_group_remove_device(dev); + + iommu_device_unlink(iommu->iommu_dev, dev); } -static struct iommu_ops intel_iommu_ops = { +static const struct iommu_ops intel_iommu_ops = { .domain_init = intel_iommu_domain_init, .domain_destroy = intel_iommu_domain_destroy, .attach_dev = intel_iommu_attach_device, diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 9b174893f0f5..0df41f6264f5 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -70,6 +70,11 @@ static int get_irte(int irq, struct irte *entry) raw_spin_lock_irqsave(&irq_2_ir_lock, flags); + if (unlikely(!irq_iommu->iommu)) { + raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); + return -1; + } + index = irq_iommu->irte_index + irq_iommu->sub_handle; *entry = *(irq_iommu->iommu->ir_table->base + index); @@ -369,29 +374,52 @@ static int set_hpet_sid(struct irte *irte, u8 id) return 0; } +struct set_msi_sid_data { + struct pci_dev *pdev; + u16 alias; +}; + +static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque) +{ + struct set_msi_sid_data *data = opaque; + + data->pdev = pdev; + data->alias = alias; + + return 0; +} + static int set_msi_sid(struct irte *irte, struct pci_dev *dev) { - struct pci_dev *bridge; + struct set_msi_sid_data data; if (!irte || !dev) return -1; - /* PCIe device or Root Complex integrated PCI device */ - if (pci_is_pcie(dev) || !dev->bus->parent) { - set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, - (dev->bus->number << 8) | dev->devfn); - return 0; - } + pci_for_each_dma_alias(dev, set_msi_sid_cb, &data); - bridge = pci_find_upstream_pcie_bridge(dev); - if (bridge) { - if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ - set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, - (bridge->bus->number << 8) | dev->bus->number); - else /* this is a legacy PCI bridge */ - set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, - (bridge->bus->number << 8) | bridge->devfn); - } + /* + * DMA alias provides us with a PCI device and alias. The only case + * where the it will return an alias on a different bus than the + * device is the case of a PCIe-to-PCI bridge, where the alias is for + * the subordinate bus. In this case we can only verify the bus. + * + * If the alias device is on a different bus than our source device + * then we have a topology based alias, use it. + * + * Otherwise, the alias is for a device DMA quirk and we cannot + * assume that MSI uses the same requester ID. Therefore use the + * original device. + */ + if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number) + set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, + PCI_DEVID(PCI_BUS_NUM(data.alias), + dev->bus->number)); + else if (data.pdev->bus->number != dev->bus->number) + set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias); + else + set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, + PCI_DEVID(dev->bus->number, dev->devfn)); return 0; } diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c new file mode 100644 index 000000000000..39b2d9127dbf --- /dev/null +++ b/drivers/iommu/iommu-sysfs.c @@ -0,0 +1,134 @@ +/* + * IOMMU sysfs class support + * + * Copyright (C) 2014 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/device.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/slab.h> + +/* + * We provide a common class "devices" group which initially has no attributes. + * As devices are added to the IOMMU, we'll add links to the group. + */ +static struct attribute *devices_attr[] = { + NULL, +}; + +static const struct attribute_group iommu_devices_attr_group = { + .name = "devices", + .attrs = devices_attr, +}; + +static const struct attribute_group *iommu_dev_groups[] = { + &iommu_devices_attr_group, + NULL, +}; + +static void iommu_release_device(struct device *dev) +{ + kfree(dev); +} + +static struct class iommu_class = { + .name = "iommu", + .dev_release = iommu_release_device, + .dev_groups = iommu_dev_groups, +}; + +static int __init iommu_dev_init(void) +{ + return class_register(&iommu_class); +} +postcore_initcall(iommu_dev_init); + +/* + * Create an IOMMU device and return a pointer to it. IOMMU specific + * attributes can be provided as an attribute group, allowing a unique + * namespace per IOMMU type. + */ +struct device *iommu_device_create(struct device *parent, void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...) +{ + struct device *dev; + va_list vargs; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + + device_initialize(dev); + + dev->class = &iommu_class; + dev->parent = parent; + dev->groups = groups; + dev_set_drvdata(dev, drvdata); + + va_start(vargs, fmt); + ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs); + va_end(vargs); + if (ret) + goto error; + + ret = device_add(dev); + if (ret) + goto error; + + return dev; + +error: + put_device(dev); + return ERR_PTR(ret); +} + +void iommu_device_destroy(struct device *dev) +{ + if (!dev || IS_ERR(dev)) + return; + + device_unregister(dev); +} + +/* + * IOMMU drivers can indicate a device is managed by a given IOMMU using + * this interface. A link to the device will be created in the "devices" + * directory of the IOMMU device in sysfs and an "iommu" link will be + * created under the linked device, pointing back at the IOMMU device. + */ +int iommu_device_link(struct device *dev, struct device *link) +{ + int ret; + + if (!dev || IS_ERR(dev)) + return -ENODEV; + + ret = sysfs_add_link_to_group(&dev->kobj, "devices", + &link->kobj, dev_name(link)); + if (ret) + return ret; + + ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu"); + if (ret) + sysfs_remove_link_from_group(&dev->kobj, "devices", + dev_name(link)); + + return ret; +} + +void iommu_device_unlink(struct device *dev, struct device *link) +{ + if (!dev || IS_ERR(dev)) + return; + + sysfs_remove_link(&link->kobj, "iommu"); + sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link)); +} diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index e5555fcfe703..169836020208 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -29,12 +29,17 @@ #include <linux/idr.h> #include <linux/notifier.h> #include <linux/err.h> +#include <linux/pci.h> #include <trace/events/iommu.h> static struct kset *iommu_group_kset; static struct ida iommu_group_ida; static struct mutex iommu_group_mutex; +struct iommu_callback_data { + const struct iommu_ops *ops; +}; + struct iommu_group { struct kobject kobj; struct kobject *devices_kobj; @@ -514,9 +519,191 @@ int iommu_group_id(struct iommu_group *group) } EXPORT_SYMBOL_GPL(iommu_group_id); +/* + * To consider a PCI device isolated, we require ACS to support Source + * Validation, Request Redirection, Completer Redirection, and Upstream + * Forwarding. This effectively means that devices cannot spoof their + * requester ID, requests and completions cannot be redirected, and all + * transactions are forwarded upstream, even as it passes through a + * bridge where the target device is downstream. + */ +#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) + +struct group_for_pci_data { + struct pci_dev *pdev; + struct iommu_group *group; +}; + +/* + * DMA alias iterator callback, return the last seen device. Stop and return + * the IOMMU group if we find one along the way. + */ +static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) +{ + struct group_for_pci_data *data = opaque; + + data->pdev = pdev; + data->group = iommu_group_get(&pdev->dev); + + return data->group != NULL; +} + +/* + * Use standard PCI bus topology, isolation features, and DMA alias quirks + * to find or create an IOMMU group for a device. + */ +static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev) +{ + struct group_for_pci_data data; + struct pci_bus *bus; + struct iommu_group *group = NULL; + struct pci_dev *tmp; + + /* + * Find the upstream DMA alias for the device. A device must not + * be aliased due to topology in order to have its own IOMMU group. + * If we find an alias along the way that already belongs to a + * group, use it. + */ + if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) + return data.group; + + pdev = data.pdev; + + /* + * Continue upstream from the point of minimum IOMMU granularity + * due to aliases to the point where devices are protected from + * peer-to-peer DMA by PCI ACS. Again, if we find an existing + * group, use it. + */ + for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { + if (!bus->self) + continue; + + if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) + break; + + pdev = bus->self; + + group = iommu_group_get(&pdev->dev); + if (group) + return group; + } + + /* + * Next we need to consider DMA alias quirks. If one device aliases + * to another, they should be grouped together. It's theoretically + * possible that aliases could create chains of devices where each + * device aliases another device. If we then factor in multifunction + * ACS grouping requirements, each alias could incorporate a new slot + * with multiple functions, each with aliases. This is all extremely + * unlikely as DMA alias quirks are typically only used for PCIe + * devices where we usually have a single slot per bus. Furthermore, + * the alias quirk is usually to another function within the slot + * (and ACS multifunction is not supported) or to a different slot + * that doesn't physically exist. The likely scenario is therefore + * that everything on the bus gets grouped together. To reduce the + * problem space, share the IOMMU group for all devices on the bus + * if a DMA alias quirk is present on the bus. + */ + tmp = NULL; + for_each_pci_dev(tmp) { + if (tmp->bus != pdev->bus || + !(tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) + continue; + + pci_dev_put(tmp); + tmp = NULL; + + /* We have an alias quirk, search for an existing group */ + for_each_pci_dev(tmp) { + struct iommu_group *group_tmp; + + if (tmp->bus != pdev->bus) + continue; + + group_tmp = iommu_group_get(&tmp->dev); + if (!group) { + group = group_tmp; + continue; + } + + if (group_tmp) { + WARN_ON(group != group_tmp); + iommu_group_put(group_tmp); + } + } + + return group ? group : iommu_group_alloc(); + } + + /* + * Non-multifunction devices or multifunction devices supporting + * ACS get their own group. + */ + if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) + return iommu_group_alloc(); + + /* + * Multifunction devices not supporting ACS share a group with other + * similar devices in the same slot. + */ + tmp = NULL; + for_each_pci_dev(tmp) { + if (tmp == pdev || tmp->bus != pdev->bus || + PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || + pci_acs_enabled(tmp, REQ_ACS_FLAGS)) + continue; + + group = iommu_group_get(&tmp->dev); + if (group) { + pci_dev_put(tmp); + return group; + } + } + + /* No shared group found, allocate new */ + return iommu_group_alloc(); +} + +/** + * iommu_group_get_for_dev - Find or create the IOMMU group for a device + * @dev: target device + * + * This function is intended to be called by IOMMU drivers and extended to + * support common, bus-defined algorithms when determining or creating the + * IOMMU group for a device. On success, the caller will hold a reference + * to the returned IOMMU group, which will already include the provided + * device. The reference should be released with iommu_group_put(). + */ +struct iommu_group *iommu_group_get_for_dev(struct device *dev) +{ + struct iommu_group *group = ERR_PTR(-EIO); + int ret; + + group = iommu_group_get(dev); + if (group) + return group; + + if (dev_is_pci(dev)) + group = iommu_group_get_for_pci_dev(to_pci_dev(dev)); + + if (IS_ERR(group)) + return group; + + ret = iommu_group_add_device(group, dev); + if (ret) { + iommu_group_put(group); + return ERR_PTR(ret); + } + + return group; +} + static int add_iommu_group(struct device *dev, void *data) { - struct iommu_ops *ops = data; + struct iommu_callback_data *cb = data; + const struct iommu_ops *ops = cb->ops; if (!ops->add_device) return -ENODEV; @@ -532,7 +719,7 @@ static int iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; - struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev->bus->iommu_ops; struct iommu_group *group; unsigned long group_action = 0; @@ -585,10 +772,14 @@ static struct notifier_block iommu_bus_nb = { .notifier_call = iommu_bus_notifier, }; -static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) +static void iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) { + struct iommu_callback_data cb = { + .ops = ops, + }; + bus_register_notifier(bus, &iommu_bus_nb); - bus_for_each_dev(bus, NULL, ops, add_iommu_group); + bus_for_each_dev(bus, NULL, &cb, add_iommu_group); } /** @@ -604,7 +795,7 @@ static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) * is set up. With this function the iommu-driver can set the iommu-ops * afterwards. */ -int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops) +int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) { if (bus->iommu_ops != NULL) return -EBUSY; diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 53cde086e83b..7dab5cbcc775 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1120,7 +1120,7 @@ static void ipmmu_remove_device(struct device *dev) dev->archdata.iommu = NULL; } -static struct iommu_ops ipmmu_ops = { +static const struct iommu_ops ipmmu_ops = { .domain_init = ipmmu_domain_init, .domain_destroy = ipmmu_domain_destroy, .attach_dev = ipmmu_attach_device, diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index f5ff657f49fa..49f41d6e02f1 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -674,7 +674,7 @@ fail: return 0; } -static struct iommu_ops msm_iommu_ops = { +static const struct iommu_ops msm_iommu_ops = { .domain_init = msm_iommu_domain_init, .domain_destroy = msm_iommu_domain_destroy, .attach_dev = msm_iommu_attach_dev, diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 61599e2b33ca..e202b0c24120 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1278,7 +1278,7 @@ static void omap_iommu_remove_device(struct device *dev) kfree(arch_data); } -static struct iommu_ops omap_iommu_ops = { +static const struct iommu_ops omap_iommu_ops = { .domain_init = omap_iommu_domain_init, .domain_destroy = omap_iommu_domain_destroy, .attach_dev = omap_iommu_attach_dev, diff --git a/drivers/iommu/pci.h b/drivers/iommu/pci.h deleted file mode 100644 index 352d80ae7443..000000000000 --- a/drivers/iommu/pci.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - * - * Copyright (C) 2013 Red Hat, Inc. - * Copyright (C) 2013 Freescale Semiconductor, Inc. - * - */ -#ifndef __IOMMU_PCI_H -#define __IOMMU_PCI_H - -/* Helper function for swapping pci device reference */ -static inline void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) -{ - pci_dev_put(*from); - *from = to; -} - -#endif /* __IOMMU_PCI_H */ diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c index 464acda0bbc4..1333e6fb3405 100644 --- a/drivers/iommu/shmobile-iommu.c +++ b/drivers/iommu/shmobile-iommu.c @@ -354,7 +354,7 @@ static int shmobile_iommu_add_device(struct device *dev) return 0; } -static struct iommu_ops shmobile_iommu_ops = { +static const struct iommu_ops shmobile_iommu_ops = { .domain_init = shmobile_iommu_domain_init, .domain_destroy = shmobile_iommu_domain_destroy, .attach_dev = shmobile_iommu_attach_device, diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index dba1a9fd5070..b10a8ecede8e 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -309,7 +309,7 @@ static int gart_iommu_domain_has_cap(struct iommu_domain *domain, return 0; } -static struct iommu_ops gart_iommu_ops = { +static const struct iommu_ops gart_iommu_ops = { .domain_init = gart_iommu_domain_init, .domain_destroy = gart_iommu_domain_destroy, .attach_dev = gart_iommu_attach_dev, diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 605b5b46a903..792da5ea6d12 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -947,7 +947,7 @@ static void smmu_iommu_domain_destroy(struct iommu_domain *domain) dev_dbg(smmu->dev, "smmu_as@%p\n", as); } -static struct iommu_ops smmu_iommu_ops = { +static const struct iommu_ops smmu_iommu_ops = { .domain_init = smmu_iommu_domain_init, .domain_destroy = smmu_iommu_domain_destroy, .attach_dev = smmu_iommu_attach_dev, diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index c887e6eebc41..574aba0eba4e 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -334,6 +334,15 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask, static void armada_xp_mpic_smp_cpu_init(void) { + u32 control; + int nr_irqs, i; + + control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); + nr_irqs = (control >> 2) & 0x3ff; + + for (i = 0; i < nr_irqs; i++) + writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); + /* Clear pending IPIs */ writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); @@ -474,7 +483,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, struct device_node *parent) { struct resource main_int_res, per_cpu_int_res; - int parent_irq; + int parent_irq, nr_irqs, i; u32 control; BUG_ON(of_address_to_resource(node, 0, &main_int_res)); @@ -496,9 +505,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, BUG_ON(!per_cpu_int_base); control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); + nr_irqs = (control >> 2) & 0x3ff; + + for (i = 0; i < nr_irqs; i++) + writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); armada_370_xp_mpic_domain = - irq_domain_add_linear(node, (control >> 2) & 0x3ff, + irq_domain_add_linear(node, nr_irqs, &armada_370_xp_mpic_irq_ops, NULL); BUG_ON(!armada_370_xp_mpic_domain); diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 8ee2a36d5840..c15c840987d2 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -150,7 +150,7 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np, /* Allocate a single Generic IRQ chip for this node */ ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, - np->full_name, handle_level_irq, clr, 0, 0); + np->full_name, handle_edge_irq, clr, 0, 0); if (ret) { pr_err("failed to allocate generic irq chip\n"); goto out_free_domain; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 7e11c9d6ae8c..7c131cf7cc13 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -42,6 +42,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/irqchip/arm-gic.h> +#include <asm/cputype.h> #include <asm/irq.h> #include <asm/exception.h> #include <asm/smp_plat.h> @@ -954,7 +955,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, } for_each_possible_cpu(cpu) { - unsigned long offset = percpu_offset * cpu_logical_map(cpu); + u32 mpidr = cpu_logical_map(cpu); + u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); + unsigned long offset = percpu_offset * core_id; *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; } @@ -1071,8 +1074,10 @@ gic_of_init(struct device_node *node, struct device_node *parent) gic_cnt++; return 0; } +IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init); IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); +IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 3fdda3a40269..6ce6bd3441bf 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -125,7 +125,7 @@ static struct spear_shirq spear320_shirq_ras2 = { }; static struct spear_shirq spear320_shirq_ras3 = { - .irq_nr = 3, + .irq_nr = 7, .irq_bit_off = 0, .invalid_irq = 1, .regs = { diff --git a/drivers/isdn/hisax/l3ni1.c b/drivers/isdn/hisax/l3ni1.c index 0df6691d045c..8dc791bfaa6f 100644 --- a/drivers/isdn/hisax/l3ni1.c +++ b/drivers/isdn/hisax/l3ni1.c @@ -2059,13 +2059,17 @@ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic) memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */ l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */ - if (ic->parm.ni1_io.timeout > 0) - if (!(pc = ni1_new_l3_process(st, -1))) - { free_invoke_id(st, id); + if (ic->parm.ni1_io.timeout > 0) { + pc = ni1_new_l3_process(st, -1); + if (!pc) { + free_invoke_id(st, id); return (-2); } - pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */ - pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */ + /* remember id */ + pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; + /* and procedure */ + pc->prot.ni1.proc = ic->parm.ni1_io.proc; + } if (!(skb = l3_alloc_skb(l))) { free_invoke_id(st, id); diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 61ac63237446..62f0688d45a5 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -442,7 +442,7 @@ static int get_filter(void __user *arg, struct sock_filter **p) { struct sock_fprog uprog; struct sock_filter *code = NULL; - int len, err; + int len; if (copy_from_user(&uprog, arg, sizeof(uprog))) return -EFAULT; @@ -458,12 +458,6 @@ static int get_filter(void __user *arg, struct sock_filter **p) if (IS_ERR(code)) return PTR_ERR(code); - err = sk_chk_filter(code, uprog.len); - if (err) { - kfree(code); - return err; - } - *p = code; return uprog.len; } @@ -644,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) fprog.len = len; fprog.filter = code; - if (is->pass_filter) + if (is->pass_filter) { sk_unattached_filter_destroy(is->pass_filter); - err = sk_unattached_filter_create(&is->pass_filter, &fprog); + is->pass_filter = NULL; + } + if (fprog.filter != NULL) + err = sk_unattached_filter_create(&is->pass_filter, + &fprog); + else + err = 0; kfree(code); return err; @@ -663,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) fprog.len = len; fprog.filter = code; - if (is->active_filter) + if (is->active_filter) { sk_unattached_filter_destroy(is->active_filter); - err = sk_unattached_filter_create(&is->active_filter, &fprog); + is->active_filter = NULL; + } + if (fprog.filter != NULL) + err = sk_unattached_filter_create(&is->active_filter, + &fprog); + else + err = 0; kfree(code); return err; diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 4ead4ba60656..d2899e7eb3aa 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd) disk_super = dm_block_data(sblock); + /* Verify the data block size hasn't changed */ + if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) { + DMERR("changing the data block size (from %u to %llu) is not supported", + le32_to_cpu(disk_super->data_block_size), + (unsigned long long)cmd->data_block_size); + r = -EINVAL; + goto bad; + } + r = __check_incompat_features(disk_super, cmd); if (r < 0) goto bad; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 53b213226c01..4cba2d808afb 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2003 Christophe Saout <christophe@saout.de> + * Copyright (C) 2003 Jana Saout <jana@saout.de> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com> @@ -1996,6 +1996,6 @@ static void __exit dm_crypt_exit(void) module_init(dm_crypt_init); module_exit(dm_crypt_exit); -MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); +MODULE_AUTHOR("Jana Saout <jana@saout.de>"); MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 3842ac738f98..db404a0f7e2c 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -10,6 +10,7 @@ #include <linux/device-mapper.h> #include <linux/bio.h> +#include <linux/completion.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/sched.h> @@ -32,7 +33,7 @@ struct dm_io_client { struct io { unsigned long error_bits; atomic_t count; - struct task_struct *sleeper; + struct completion *wait; struct dm_io_client *client; io_notify_fn callback; void *context; @@ -121,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error) invalidate_kernel_vmap_range(io->vma_invalidate_address, io->vma_invalidate_size); - if (io->sleeper) - wake_up_process(io->sleeper); + if (io->wait) + complete(io->wait); else { unsigned long r = io->error_bits; @@ -387,6 +388,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, */ volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); + DECLARE_COMPLETION_ONSTACK(wait); if (num_regions > 1 && (rw & RW_MASK) != WRITE) { WARN_ON(1); @@ -395,7 +397,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, io->error_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->sleeper = current; + io->wait = &wait; io->client = client; io->vma_invalidate_address = dp->vma_invalidate_address; @@ -403,15 +405,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, dispatch_io(rw, num_regions, where, dp, io, 1); - while (1) { - set_current_state(TASK_UNINTERRUPTIBLE); - - if (!atomic_read(&io->count)) - break; - - io_schedule(); - } - set_current_state(TASK_RUNNING); + wait_for_completion_io(&wait); if (error_bits) *error_bits = io->error_bits; @@ -434,7 +428,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, io = mempool_alloc(client->pool, GFP_NOIO); io->error_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->sleeper = NULL; + io->wait = NULL; io->client = client; io->callback = fn; io->context = context; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 3f6fd9d33ba3..f4167b013d99 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1611,8 +1611,9 @@ static int multipath_busy(struct dm_target *ti) spin_lock_irqsave(&m->lock, flags); - /* pg_init in progress, requeue until done */ - if (!pg_ready(m)) { + /* pg_init in progress or no paths available */ + if (m->pg_init_in_progress || + (!m->nr_valid_paths && m->queue_if_no_path)) { busy = 1; goto out; } diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index b086a945edcb..e9d33ad59df5 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd) disk_super = dm_block_data(sblock); + /* Verify the data block size hasn't changed */ + if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) { + DMERR("changing the data block size (from %u to %llu) is not supported", + le32_to_cpu(disk_super->data_block_size), + (unsigned long long)pmd->data_block_size); + r = -EINVAL; + goto bad_unlock_sblock; + } + r = __check_incompat_features(disk_super, pmd); if (r < 0) goto bad_unlock_sblock; diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c index c99003e0d47a..b9a64bbce304 100644 --- a/drivers/md/dm-zero.c +++ b/drivers/md/dm-zero.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2003 Christophe Saout <christophe@saout.de> + * Copyright (C) 2003 Jana Saout <jana@saout.de> * * This file is released under the GPL. */ @@ -79,6 +79,6 @@ static void __exit dm_zero_exit(void) module_init(dm_zero_init) module_exit(dm_zero_exit) -MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); +MODULE_AUTHOR("Jana Saout <jana@saout.de>"); MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros"); MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 437d99045ef2..32b958dbc499 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -54,6 +54,8 @@ static void do_deferred_remove(struct work_struct *w); static DECLARE_WORK(deferred_remove_work, do_deferred_remove); +static struct workqueue_struct *deferred_remove_workqueue; + /* * For bio-based dm. * One of these is allocated per bio. @@ -276,16 +278,24 @@ static int __init local_init(void) if (r) goto out_free_rq_tio_cache; + deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); + if (!deferred_remove_workqueue) { + r = -ENOMEM; + goto out_uevent_exit; + } + _major = major; r = register_blkdev(_major, _name); if (r < 0) - goto out_uevent_exit; + goto out_free_workqueue; if (!_major) _major = r; return 0; +out_free_workqueue: + destroy_workqueue(deferred_remove_workqueue); out_uevent_exit: dm_uevent_exit(); out_free_rq_tio_cache: @@ -299,6 +309,7 @@ out_free_io_cache: static void local_exit(void) { flush_scheduled_work(); + destroy_workqueue(deferred_remove_workqueue); kmem_cache_destroy(_rq_tio_cache); kmem_cache_destroy(_io_cache); @@ -407,7 +418,7 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode) if (atomic_dec_and_test(&md->open_count) && (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) - schedule_work(&deferred_remove_work); + queue_work(deferred_remove_workqueue, &deferred_remove_work); dm_put(md); diff --git a/drivers/md/md.c b/drivers/md/md.c index 34846856dbc6..32fc19c540d4 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5599,7 +5599,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg) if (mddev->in_sync) info.state = (1<<MD_SB_CLEAN); if (mddev->bitmap && mddev->bitmap_info.offset) - info.state = (1<<MD_SB_BITMAP_PRESENT); + info.state |= (1<<MD_SB_BITMAP_PRESENT); info.active_disks = insync; info.working_disks = working; info.failed_disks = failed; @@ -7501,6 +7501,19 @@ void md_do_sync(struct md_thread *thread) rdev->recovery_offset < j) j = rdev->recovery_offset; rcu_read_unlock(); + + /* If there is a bitmap, we need to make sure all + * writes that started before we added a spare + * complete before we start doing a recovery. + * Otherwise the write might complete and (via + * bitmap_endwrite) set a bit in the bitmap after the + * recovery has checked that bit and skipped that + * region. + */ + if (mddev->bitmap) { + mddev->pers->quiesce(mddev, 1); + mddev->pers->quiesce(mddev, 0); + } } printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c index 8637d2ed7623..2e3cdcfa0a67 100644 --- a/drivers/media/dvb-frontends/si2168.c +++ b/drivers/media/dvb-frontends/si2168.c @@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd) jiffies_to_msecs(jiffies) - (jiffies_to_msecs(timeout) - TIMEOUT)); - if (!(cmd->args[0] >> 7) & 0x01) { + if (!((cmd->args[0] >> 7) & 0x01)) { ret = -ETIMEDOUT; goto err_mutex_unlock; } @@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe) if (ret) goto err; - cmd.args[0] = 0x05; - cmd.args[1] = 0x00; - cmd.args[2] = 0xaa; - cmd.args[3] = 0x4d; - cmd.args[4] = 0x56; - cmd.args[5] = 0x40; - cmd.args[6] = 0x00; - cmd.args[7] = 0x00; - cmd.wlen = 8; - cmd.rlen = 1; - ret = si2168_cmd_execute(s, &cmd); - if (ret) - goto err; - /* cold state - try to download firmware */ dev_info(&s->client->dev, "%s: found a '%s' in cold state\n", KBUILD_MODNAME, si2168_ops.info.name); diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h index 2a343e896f40..53f7f06ae343 100644 --- a/drivers/media/dvb-frontends/si2168_priv.h +++ b/drivers/media/dvb-frontends/si2168_priv.h @@ -22,7 +22,7 @@ #include <linux/firmware.h> #include <linux/i2c-mux.h> -#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw" +#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw" /* state struct */ struct si2168 { diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c index 522fe00f5eee..9619be5d4827 100644 --- a/drivers/media/dvb-frontends/tda10071.c +++ b/drivers/media/dvb-frontends/tda10071.c @@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe) struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret, i; u8 mode, rolloff, pilot, inversion, div; + fe_modulation_t modulation; dev_dbg(&priv->i2c->dev, "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n", @@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe) switch (c->delivery_system) { case SYS_DVBS: + modulation = QPSK; rolloff = 0; pilot = 2; break; case SYS_DVBS2: + modulation = c->modulation; + switch (c->rolloff) { case ROLLOFF_20: rolloff = 2; @@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe) for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) { if (c->delivery_system == TDA10071_MODCOD[i].delivery_system && - c->modulation == TDA10071_MODCOD[i].modulation && + modulation == TDA10071_MODCOD[i].modulation && c->fec_inner == TDA10071_MODCOD[i].fec) { mode = TDA10071_MODCOD[i].val; dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n", @@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe) switch ((buf[1] >> 0) & 0x01) { case 0: - c->inversion = INVERSION_OFF; + c->inversion = INVERSION_ON; break; case 1: - c->inversion = INVERSION_ON; + c->inversion = INVERSION_OFF; break; } @@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe) if (ret) goto error; - c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0); + c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000; return ret; error: diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h index 4baf14bfb65a..420486192736 100644 --- a/drivers/media/dvb-frontends/tda10071_priv.h +++ b/drivers/media/dvb-frontends/tda10071_priv.h @@ -55,6 +55,7 @@ static struct tda10071_modcod { { SYS_DVBS2, QPSK, FEC_8_9, 0x0a }, { SYS_DVBS2, QPSK, FEC_9_10, 0x0b }, /* 8PSK */ + { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 }, { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c }, { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d }, { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e }, diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c index e65c760e4e8b..0006d6bf8c18 100644 --- a/drivers/media/pci/saa7134/saa7134-empress.c +++ b/drivers/media/pci/saa7134/saa7134-empress.c @@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops = .read = vb2_fop_read, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops ts_ioctl_ops = { diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index a7ed16497903..1e4ec697fb10 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -269,6 +269,7 @@ err: list_del(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); } + spin_unlock_irqrestore(&common->irqlock, flags); return ret; } diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index 5bb085b19bcb..b431b58f39e3 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -233,6 +233,7 @@ err: list_del(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); } + spin_unlock_irqrestore(&common->irqlock, flags); return ret; } diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c index 271a752cee54..fa4cc7b880aa 100644 --- a/drivers/media/tuners/si2157.c +++ b/drivers/media/tuners/si2157.c @@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd) jiffies_to_msecs(jiffies) - (jiffies_to_msecs(timeout) - TIMEOUT)); - if (!(buf[0] >> 7) & 0x01) { + if (!((buf[0] >> 7) & 0x01)) { ret = -ETIMEDOUT; goto err_mutex_unlock; } else { diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 021e4d35e4d7..7b9b75f60774 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d) if (ret < 0) goto err; - if (tmp == 0x00) - dev_dbg(&d->udev->dev, - "%s: [%d]tuner not set, using default\n", - __func__, i); - else + dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n", + __func__, i, tmp); + + /* tuner sanity check */ + if (state->chip_type == 0x9135) { + if (state->chip_version == 0x02) { + /* IT9135 BX (v2) */ + switch (tmp) { + case AF9033_TUNER_IT9135_60: + case AF9033_TUNER_IT9135_61: + case AF9033_TUNER_IT9135_62: + state->af9033_config[i].tuner = tmp; + break; + } + } else { + /* IT9135 AX (v1) */ + switch (tmp) { + case AF9033_TUNER_IT9135_38: + case AF9033_TUNER_IT9135_51: + case AF9033_TUNER_IT9135_52: + state->af9033_config[i].tuner = tmp; + break; + } + } + } else { + /* AF9035 */ state->af9033_config[i].tuner = tmp; + } - dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n", - __func__, i, state->af9033_config[i].tuner); + if (state->af9033_config[i].tuner != tmp) { + dev_info(&d->udev->dev, + "%s: [%d] overriding tuner from %02x to %02x\n", + KBUILD_MODNAME, i, tmp, + state->af9033_config[i].tuner); + } switch (state->af9033_config[i].tuner) { case AF9033_TUNER_TUA9001: diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c index 2fd1c5e31a0f..339adce7c7a5 100644 --- a/drivers/media/usb/gspca/pac7302.c +++ b/drivers/media/usb/gspca/pac7302.c @@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x093a, 0x2620)}, {USB_DEVICE(0x093a, 0x2621)}, {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP}, + {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2625)}, {USB_DEVICE(0x093a, 0x2626)}, diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c index 0500c4175d5f..6bce01a674f9 100644 --- a/drivers/media/usb/hdpvr/hdpvr-video.c +++ b/drivers/media/usb/hdpvr/hdpvr-video.c @@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb) } /*=========================================================================*/ -/* bufffer bits */ +/* buffer bits */ /* function expects dev->io_mutex to be hold by caller */ int hdpvr_cancel_queue(struct hdpvr_device *dev) @@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_MPEG_AUDIO_ENCODING: if (dev->flags & HDPVR_FLAG_AC3_CAP) { opt->audio_codec = ctrl->val; - return hdpvr_set_audio(dev, opt->audio_input, + return hdpvr_set_audio(dev, opt->audio_input + 1, opt->audio_codec); } return 0; @@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent, v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, V4L2_CID_MPEG_AUDIO_ENCODING, ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC, - 0x7, V4L2_MPEG_AUDIO_ENCODING_AAC); + 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC); v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3, diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index 4ae54caadd03..ce1c9f5d9dee 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait) aspect.denominator = 9; } else if (ratio == 34) { aspect.numerator = 4; - aspect.numerator = 3; + aspect.denominator = 3; } else if (ratio == 68) { aspect.numerator = 15; - aspect.numerator = 9; + aspect.denominator = 9; } else { aspect.numerator = hor_landscape + 99; aspect.denominator = 100; diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index e4ec355704a6..a7543ba3e190 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -52,6 +52,11 @@ /* Atmel chips */ #define AT49BV640D 0x02de #define AT49BV640DT 0x02db +/* Sharp chips */ +#define LH28F640BFHE_PTTL90 0x00b0 +#define LH28F640BFHE_PBTL90 0x00b1 +#define LH28F640BFHE_PTTL70A 0x00b2 +#define LH28F640BFHE_PBTL70A 0x00b3 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); @@ -258,6 +263,36 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd) (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; }; +static int is_LH28F640BF(struct cfi_private *cfi) +{ + /* Sharp LH28F640BF Family */ + if (cfi->mfr == CFI_MFR_SHARP && ( + cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 || + cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A)) + return 1; + return 0; +} + +static void fixup_LH28F640BF(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_intelext *extp = cfi->cmdset_priv; + + /* Reset the Partition Configuration Register on LH28F640BF + * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */ + if (is_LH28F640BF(cfi)) { + printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n"); + map_write(map, CMD(0x60), 0); + map_write(map, CMD(0x04), 0); + + /* We have set one single partition thus + * Simultaneous Operations are not allowed */ + printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n"); + extp->FeatureSupport &= ~512; + } +} + static void fixup_use_point(struct mtd_info *mtd) { struct map_info *map = mtd->priv; @@ -309,6 +344,8 @@ static struct cfi_fixup cfi_fixup_table[] = { { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct }, { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb }, { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock }, + { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock }, + { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF }, { 0, 0, NULL } }; @@ -1649,6 +1686,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, initial_adr = adr; cmd_adr = adr & ~(wbufsize-1); + /* Sharp LH28F640BF chips need the first address for the + * Page Buffer Program command. See Table 5 of + * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */ + if (is_LH28F640BF(cfi)) + cmd_adr = adr; + /* Let's determine this according to the interleave only once */ write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9); diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c index 7df86948e6d4..b4f61c7fc161 100644 --- a/drivers/mtd/devices/elm.c +++ b/drivers/mtd/devices/elm.c @@ -475,6 +475,7 @@ static int elm_context_save(struct elm_info *info) ELM_SYNDROME_FRAGMENT_1 + offset); regs->elm_syndrome_fragment_0[i] = elm_read_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset); + break; default: return -EINVAL; } @@ -520,6 +521,7 @@ static int elm_context_restore(struct elm_info *info) regs->elm_syndrome_fragment_1[i]); elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset, regs->elm_syndrome_fragment_0[i]); + break; default: return -EINVAL; } diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 41167e9e991e..4f3e80c68a26 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -4047,8 +4047,10 @@ int nand_scan_tail(struct mtd_info *mtd) ecc->layout->oobavail += ecc->layout->oobfree[i].length; mtd->oobavail = ecc->layout->oobavail; - /* ECC sanity check: warn noisily if it's too weak */ - WARN_ON(!nand_ecc_strength_good(mtd)); + /* ECC sanity check: warn if it's too weak */ + if (!nand_ecc_strength_good(mtd)) + pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", + mtd->name); /* * Set the number of read / write steps for one page depending on ECC diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index b04e7d059888..0431b46d9fd9 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -125,7 +125,7 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, parent = *p; av = rb_entry(parent, struct ubi_ainf_volume, rb); - if (vol_id < av->vol_id) + if (vol_id > av->vol_id) p = &(*p)->rb_left; else p = &(*p)->rb_right; @@ -423,7 +423,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, pnum, err); ret = err > 0 ? UBI_BAD_FASTMAP : err; goto out; - } else if (ret == UBI_IO_BITFLIPS) + } else if (err == UBI_IO_BITFLIPS) scrub = 1; /* diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 3a451b6cd3d5..701f86cd5993 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4068,7 +4068,7 @@ static int bond_check_params(struct bond_params *params) } if (ad_select) { - bond_opt_initstr(&newval, lacp_rate); + bond_opt_initstr(&newval, ad_select); valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT), &newval); if (!valptr) { diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 141160ef249a..5776e503e4c5 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -654,13 +654,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) work_done = bcm_sysport_tx_reclaim(ring->priv, ring); - if (work_done < budget) { + if (work_done == 0) { napi_complete(napi); /* re-enable TX interrupt */ intrl2_1_mask_clear(ring->priv, BIT(ring->index)); } - return work_done; + return 0; } static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) @@ -1254,28 +1254,17 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv, usleep_range(1000, 2000); } -static inline int umac_reset(struct bcm_sysport_priv *priv) +static inline void umac_reset(struct bcm_sysport_priv *priv) { - unsigned int timeout = 0; u32 reg; - int ret = 0; - - umac_writel(priv, 0, UMAC_CMD); - while (timeout++ < 1000) { - reg = umac_readl(priv, UMAC_CMD); - if (!(reg & CMD_SW_RESET)) - break; - - udelay(1); - } - - if (timeout == 1000) { - dev_err(&priv->pdev->dev, - "timeout waiting for MAC to come out of reset\n"); - ret = -ETIMEDOUT; - } - return ret; + reg = umac_readl(priv, UMAC_CMD); + reg |= CMD_SW_RESET; + umac_writel(priv, reg, UMAC_CMD); + udelay(10); + reg = umac_readl(priv, UMAC_CMD); + reg &= ~CMD_SW_RESET; + umac_writel(priv, reg, UMAC_CMD); } static void umac_set_hw_addr(struct bcm_sysport_priv *priv, @@ -1303,11 +1292,7 @@ static int bcm_sysport_open(struct net_device *dev) int ret; /* Reset UniMAC */ - ret = umac_reset(priv); - if (ret) { - netdev_err(dev, "UniMAC reset failed\n"); - return ret; - } + umac_reset(priv); /* Flush TX and RX FIFOs at TOPCTRL level */ topctrl_flush(priv); @@ -1589,12 +1574,6 @@ static int bcm_sysport_probe(struct platform_device *pdev) BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); dev->needed_headroom += sizeof(struct bcm_tsb); - /* We are interfaced to a switch which handles the multicast - * filtering for us, so we do not support programming any - * multicast hash table in this Ethernet MAC. - */ - dev->flags &= ~IFF_MULTICAST; - /* libphy will adjust the link state accordingly */ netif_carrier_off(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 47c5814114e1..4b875da1c7ed 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -797,7 +797,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, return; } - bnx2x_frag_free(fp, new_data); + if (new_data) + bnx2x_frag_free(fp, new_data); drop: /* drop the packet and keep the buffer in the bin */ DP(NETIF_MSG_RX_STATUS, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2887034523e0..6a8b1453a1b9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12937,7 +12937,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt) * without the default SB. * For VFs there is no default SB, then we return (index+1). */ - pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control); + pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control); index = control & PCI_MSIX_FLAGS_QSIZE; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 5ba1cfbd60da..16281ad2da12 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1408,13 +1408,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv) if (cb->skb) continue; - /* set the DMA descriptor length once and for all - * it will only change if we support dynamically sizing - * priv->rx_buf_len, but we do not - */ - dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr, - priv->rx_buf_len << DMA_BUFLENGTH_SHIFT); - ret = bcmgenet_rx_refill(priv, cb); if (ret) break; @@ -2535,14 +2528,17 @@ static int bcmgenet_probe(struct platform_device *pdev) netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); - err = register_netdev(dev); - if (err) - goto err_clk_disable; + /* libphy will determine the link state */ + netif_carrier_off(dev); /* Turn off the main clock, WOL clock is handled separately */ if (!IS_ERR(priv->clk)) clk_disable_unprepare(priv->clk); + err = register_netdev(dev); + if (err) + goto err; + return err; err_clk_disable: diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 0f117105fed1..e23c993b1362 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -331,9 +331,9 @@ struct bcmgenet_mib_counters { #define EXT_ENERGY_DET_MASK (1 << 12) #define EXT_RGMII_OOB_CTRL 0x0C -#define RGMII_MODE_EN (1 << 0) #define RGMII_LINK (1 << 4) #define OOB_DISABLE (1 << 5) +#define RGMII_MODE_EN (1 << 6) #define ID_MODE_DIS (1 << 16) #define EXT_GPHY_CTRL 0x1C diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 34a26e42f19d..1e187fb760f8 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2902,7 +2902,7 @@ static int be_open(struct net_device *netdev) for_all_evt_queues(adapter, eqo, i) { napi_enable(&eqo->napi); be_enable_busy_poll(eqo); - be_eq_notify(adapter, eqo->q.id, true, false, 0); + be_eq_notify(adapter, eqo->q.id, true, true, 0); } adapter->flags |= BE_FLAGS_NAPI_ENABLED; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index fab39e295441..36fc429298e3 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -2990,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) if (ug_info->rxExtendedFiltering) { size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; if (ug_info->largestexternallookupkeysize == - QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; if (ug_info->largestexternallookupkeysize == - QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES) size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; } diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index a2db388cc31e..ee74f9536b31 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1481,6 +1481,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) s32 ret_val; u16 i, rar_count = mac->rar_entry_count; + if ((hw->mac.type >= e1000_i210) && + !(igb_get_flash_presence_i210(hw))) { + ret_val = igb_pll_workaround_i210(hw); + if (ret_val) + return ret_val; + } + /* Initialize identification LED */ ret_val = igb_id_led_init(hw); if (ret_val) { diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 2a8bb35c2df2..217f8138851b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -46,14 +46,15 @@ #define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ /* Physical Func Reset Done Indication */ -#define E1000_CTRL_EXT_PFRSTD 0x00004000 -#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 -#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 -#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 -#define E1000_CTRL_EXT_EIAME 0x01000000 -#define E1000_CTRL_EXT_IRCA 0x00000001 +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 /* Interrupt delay cancellation */ /* Driver loaded bit for FW */ #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 @@ -62,6 +63,7 @@ /* packet buffer parity error detection enabled */ /* descriptor FIFO parity error detection enable */ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 #define E1000_I2CCMD_REG_ADDR_SHIFT 16 #define E1000_I2CCMD_PHY_ADDR_SHIFT 24 #define E1000_I2CCMD_OPCODE_READ 0x08000000 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 89925e405849..ce55ea5d750c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw); /* These functions must be implemented by drivers */ s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); #endif /* _E1000_HW_H_ */ diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 337161f440dd..65d931669f81 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw) } return ret_val; } + +/** + * igb_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +s32 igb_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = rd32(E1000_WUC); + mdicnfg = rd32(E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + wr32(E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | + E1000_PHY_PLL_FREQ_REG), &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = 0; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + wr32(E1000_CTRL_EXT, ctrl_ext); + + wr32(E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + wr32(E1000_EEARBC_I210, reg_val); + + igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + usleep_range(1000, 2000); + pci_word &= ~E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + wr32(E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + wr32(E1000_WUC, wuc); + } + /* restore MDICNFG setting */ + wr32(E1000_MDICNFG, mdicnfg); + return ret_val; +} diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index 9f34976687ba..3442b6357d01 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); s32 igb_init_nvm_params_i210(struct e1000_hw *hw); bool igb_get_flash_presence_i210(struct e1000_hw *hw); +s32 igb_pll_workaround_i210(struct e1000_hw *hw); #define E1000_STM_OPCODE 0xDB00 #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 @@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE { #define NVM_LED_1_CFG_DEFAULT_I211 0x0184 #define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + #endif diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 1cc4b1a7e597..f5ba4e4eafb9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -66,6 +66,7 @@ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ #define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f145adbb55ac..a9537ba7a5a0 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -7215,6 +7215,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) } } +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; @@ -7578,6 +7592,8 @@ static int igb_sriov_reinit(struct pci_dev *dev) if (netif_running(netdev)) igb_close(netdev); + else + igb_reset(adapter); igb_clear_interrupt_scheme(adapter); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 45beca17fa50..dadd9a5f6323 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; - if (l3_proto == swab16(ETH_P_IP)) + if (l3_proto == htons(ETH_P_IP)) command |= MVNETA_TXD_IP_CSUM; else command |= MVNETA_TX_L3_IP6; @@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev) if (phydev->speed == SPEED_1000) val |= MVNETA_GMAC_CONFIG_GMII_SPEED; - else + else if (phydev->speed == SPEED_100) val |= MVNETA_GMAC_CONFIG_MII_SPEED; mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 80f725228f5b..56022d647837 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, init_completion(&cq->free); cq->irq = priv->eq_table.eq[cq->vector].irq; - cq->irq_affinity_change = false; - return 0; err_radix: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 4b2130760eed..82322b1c8411 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -128,11 +128,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", name); } + } } else { cq->vector = (cq->ring + 1 + priv->port) % mdev->dev->caps.num_comp_vectors; } + + cq->irq_desc = + irq_to_desc(mlx4_eq_get_irq(mdev->dev, + cq->vector)); } else { /* For TX we use the same irq per ring we assigned for the RX */ @@ -187,8 +192,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); if (priv->mdev->dev->caps.comp_pool && cq->vector) { - if (!cq->is_tx) - irq_set_affinity_hint(cq->mcq.irq, NULL); mlx4_release_eq(priv->mdev->dev, cq->vector); } cq->vector = 0; @@ -204,6 +207,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) if (!cq->is_tx) { napi_hash_del(&cq->napi); synchronize_rcu(); + irq_set_affinity_hint(cq->mcq.irq, NULL); } netif_napi_del(&cq->napi); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index fa1a069e14e6..68d763d2d030 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -417,6 +417,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev, coal->tx_coalesce_usecs = priv->tx_usecs; coal->tx_max_coalesced_frames = priv->tx_frames; + coal->tx_max_coalesced_frames_irq = priv->tx_work_limit; + coal->rx_coalesce_usecs = priv->rx_usecs; coal->rx_max_coalesced_frames = priv->rx_frames; @@ -426,6 +428,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev, coal->rx_coalesce_usecs_high = priv->rx_usecs_high; coal->rate_sample_interval = priv->sample_interval; coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; + return 0; } @@ -434,6 +437,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); + if (!coal->tx_max_coalesced_frames_irq) + return -EINVAL; + priv->rx_frames = (coal->rx_max_coalesced_frames == MLX4_EN_AUTO_CONF) ? MLX4_EN_RX_COAL_TARGET : @@ -457,6 +463,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev, priv->rx_usecs_high = coal->rx_coalesce_usecs_high; priv->sample_interval = coal->rate_sample_interval; priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; + priv->tx_work_limit = coal->tx_max_coalesced_frames_irq; return mlx4_en_moderation_update(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 7d4fb7bf2593..7345c43b019e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2336,7 +2336,7 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); __be16 current_port; - if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)) + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return; if (sa_family == AF_INET6) @@ -2473,6 +2473,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, MLX4_WQE_CTRL_SOLICITED); priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; priv->tx_ring_num = prof->tx_ring_num; + priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index d2d415732d99..5535862f27cc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -40,6 +40,7 @@ #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/vmalloc.h> +#include <linux/irq.h> #include "mlx4_en.h" @@ -782,6 +783,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud PKT_HASH_TYPE_L3); skb_record_rx_queue(gro_skb, cq->ring); + skb_mark_napi_id(gro_skb, &cq->napi); if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { timestamp = mlx4_en_get_cqe_ts(cqe); @@ -896,16 +898,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) /* If we used up all the quota - we're probably not done yet... */ if (done == budget) { + int cpu_curr; + const struct cpumask *aff; + INC_PERF_COUNTER(priv->pstats.napi_quota); - if (unlikely(cq->mcq.irq_affinity_change)) { - cq->mcq.irq_affinity_change = false; + + cpu_curr = smp_processor_id(); + aff = irq_desc_get_irq_data(cq->irq_desc)->affinity; + + if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) { + /* Current cpu is not according to smp_irq_affinity - + * probably affinity changed. need to stop this NAPI + * poll, and restart it on the right CPU + */ napi_complete(napi); mlx4_en_arm_cq(priv, cq); return 0; } } else { /* Done for now */ - cq->mcq.irq_affinity_change = false; napi_complete(napi); mlx4_en_arm_cq(priv, cq); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 8be7483f8236..5045bab59633 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -351,9 +351,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) return cnt; } -static int mlx4_en_process_tx_cq(struct net_device *dev, - struct mlx4_en_cq *cq, - int budget) +static bool mlx4_en_process_tx_cq(struct net_device *dev, + struct mlx4_en_cq *cq) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; @@ -372,9 +371,10 @@ static int mlx4_en_process_tx_cq(struct net_device *dev, int factor = priv->cqe_factor; u64 timestamp = 0; int done = 0; + int budget = priv->tx_work_limit; if (!priv->port_up) - return 0; + return true; index = cons_index & size_mask; cqe = &buf[(index << factor) + factor]; @@ -447,7 +447,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev, netif_tx_wake_queue(ring->tx_queue); ring->wake_queue++; } - return done; + return done < budget; } void mlx4_en_tx_irq(struct mlx4_cq *mcq) @@ -467,24 +467,16 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget) struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); struct net_device *dev = cq->dev; struct mlx4_en_priv *priv = netdev_priv(dev); - int done; + int clean_complete; - done = mlx4_en_process_tx_cq(dev, cq, budget); + clean_complete = mlx4_en_process_tx_cq(dev, cq); + if (!clean_complete) + return budget; - /* If we used up all the quota - we're probably not done yet... */ - if (done < budget) { - /* Done for now */ - cq->mcq.irq_affinity_change = false; - napi_complete(napi); - mlx4_en_arm_cq(priv, cq); - return done; - } else if (unlikely(cq->mcq.irq_affinity_change)) { - cq->mcq.irq_affinity_change = false; - napi_complete(napi); - mlx4_en_arm_cq(priv, cq); - return 0; - } - return budget; + napi_complete(napi); + mlx4_en_arm_cq(priv, cq); + + return 0; } static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index d954ec1eac17..2a004b347e1d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -53,11 +53,6 @@ enum { MLX4_EQ_ENTRY_SIZE = 0x20 }; -struct mlx4_irq_notify { - void *arg; - struct irq_affinity_notify notify; -}; - #define MLX4_EQ_STATUS_OK ( 0 << 28) #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) #define MLX4_EQ_OWNER_SW ( 0 << 24) @@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev) iounmap(priv->clr_base); } -static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) -{ - struct mlx4_irq_notify *n = container_of(notify, - struct mlx4_irq_notify, - notify); - struct mlx4_priv *priv = (struct mlx4_priv *)n->arg; - struct radix_tree_iter iter; - void **slot; - - radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) { - struct mlx4_cq *cq = (struct mlx4_cq *)(*slot); - - if (cq->irq == notify->irq) - cq->irq_affinity_change = true; - } -} - -static void mlx4_release_irq_notifier(struct kref *ref) -{ - struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify, - notify.kref); - kfree(n); -} - -static void mlx4_assign_irq_notifier(struct mlx4_priv *priv, - struct mlx4_dev *dev, int irq) -{ - struct mlx4_irq_notify *irq_notifier = NULL; - int err = 0; - - irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL); - if (!irq_notifier) { - mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n", - irq); - return; - } - - irq_notifier->notify.irq = irq; - irq_notifier->notify.notify = mlx4_irq_notifier_notify; - irq_notifier->notify.release = mlx4_release_irq_notifier; - irq_notifier->arg = priv; - err = irq_set_affinity_notifier(irq, &irq_notifier->notify); - if (err) { - kfree(irq_notifier); - irq_notifier = NULL; - mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq); - } -} - - int mlx4_alloc_eq_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, continue; /*we dont want to break here*/ } - mlx4_assign_irq_notifier(priv, dev, - priv->eq_table.eq[vec].irq); eq_set_ci(&priv->eq_table.eq[vec], 1); } @@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, } EXPORT_SYMBOL(mlx4_assign_eq); +int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return priv->eq_table.eq[vec].irq; +} +EXPORT_SYMBOL(mlx4_eq_get_irq); + void mlx4_release_eq(struct mlx4_dev *dev, int vec) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec) Belonging to a legacy EQ*/ mutex_lock(&priv->msix_ctl.pool_lock); if (priv->msix_ctl.pool_bm & 1ULL << i) { - irq_set_affinity_notifier( - priv->eq_table.eq[vec].irq, - NULL); free_irq(priv->eq_table.eq[vec].irq, &priv->eq_table.eq[vec]); priv->msix_ctl.pool_bm &= ~(1ULL << i); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 0e15295bedd6..d72a5a894fc6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -126,6 +126,8 @@ enum { #define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \ MLX4_EN_NUM_UP) +#define MLX4_EN_DEFAULT_TX_WORK 256 + /* Target number of packets to coalesce with interrupt moderation */ #define MLX4_EN_RX_COAL_TARGET 44 #define MLX4_EN_RX_COAL_TIME 0x10 @@ -343,6 +345,7 @@ struct mlx4_en_cq { #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) spinlock_t poll_lock; /* protects from LLS/napi conflicts */ #endif /* CONFIG_NET_RX_BUSY_POLL */ + struct irq_desc *irq_desc; }; struct mlx4_en_port_profile { @@ -542,6 +545,7 @@ struct mlx4_en_priv { __be32 ctrl_flags; u32 flags; u8 num_tx_rings_p_up; + u32 tx_work_limit; u32 tx_ring_num; u32 rx_ring_num; u32 rx_skb_size; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index ba0401d4af50..184c3615f479 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -94,6 +94,11 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, write_lock_irq(&table->lock); err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); write_unlock_irq(&table->lock); + if (err) { + mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n", + mlx5_base_mkey(mr->key), err); + mlx5_core_destroy_mkey(dev, mr); + } return err; } @@ -104,12 +109,22 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) struct mlx5_mr_table *table = &dev->priv.mr_table; struct mlx5_destroy_mkey_mbox_in in; struct mlx5_destroy_mkey_mbox_out out; + struct mlx5_core_mr *deleted_mr; unsigned long flags; int err; memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); + write_lock_irqsave(&table->lock, flags); + deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key)); + write_unlock_irqrestore(&table->lock, flags); + if (!deleted_mr) { + mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n", + mlx5_base_mkey(mr->key)); + return -ENOENT; + } + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); @@ -119,10 +134,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) if (out.hdr.status) return mlx5_cmd_status_to_err(&out.hdr); - write_lock_irqsave(&table->lock, flags); - radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key)); - write_unlock_irqrestore(&table->lock, flags); - return err; } EXPORT_SYMBOL(mlx5_core_destroy_mkey); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index be425ad5e824..61623e9af574 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -538,6 +538,7 @@ enum rtl_register_content { MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ /* Config4 register */ @@ -4239,6 +4240,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_40: + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: @@ -4897,6 +4900,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev) PCI_EXP_LNKCTL_CLKREQ_EN); } +static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) +{ + void __iomem *ioaddr = tp->mmio_addr; + u8 data; + + data = RTL_R8(Config3); + + if (enable) + data |= Rdy_to_L23; + else + data &= ~Rdy_to_L23; + + RTL_W8(Config3, data); +} + #define R8168_CPCMD_QUIRK_MASK (\ EnableBist | \ Mac_dbgo_oe | \ @@ -5246,6 +5264,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) }; rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_enable(tp, false); rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); @@ -5284,6 +5303,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); } static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) @@ -5536,6 +5557,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); + + rtl_pcie_state_l2l3_enable(tp, false); } static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) @@ -5571,6 +5594,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); } static void rtl_hw_start_8106(struct rtl8169_private *tp) @@ -5583,6 +5608,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + + rtl_pcie_state_l2l3_enable(tp, false); } static void rtl_hw_start_8101(struct net_device *dev) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index b3e148ef5683..9d3748361a1e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -320,11 +320,8 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw) static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart) { - u32 value; - - value = readl(ioaddr + GMAC_AN_CTRL); /* auto negotiation enable and External Loopback enable */ - value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; + u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; if (restart) value |= GMAC_AN_CTRL_RAN; diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 7e6628a91514..1e2bcf5f89e1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -145,7 +145,7 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, x->rx_msg_type_delay_req++; else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) x->rx_msg_type_delay_resp++; - else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) + else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ) x->rx_msg_type_pdelay_req++; else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) x->rx_msg_type_pdelay_resp++; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 1c24a8f368bd..fd411d6e19a2 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac) return vp; } +static void vnet_cleanup(void) +{ + struct vnet *vp; + struct net_device *dev; + + mutex_lock(&vnet_list_mutex); + while (!list_empty(&vnet_list)) { + vp = list_first_entry(&vnet_list, struct vnet, list); + list_del(&vp->list); + dev = vp->dev; + /* vio_unregister_driver() should have cleaned up port_list */ + BUG_ON(!list_empty(&vp->port_list)); + unregister_netdev(dev); + free_netdev(dev); + } + mutex_unlock(&vnet_list_mutex); +} + static const char *local_mac_prop = "local-mac-address"; static struct vnet *vnet_find_parent(struct mdesc_handle *hp, @@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev) kfree(port); - unregister_netdev(vp->dev); } return 0; } @@ -1268,6 +1285,7 @@ static int __init vnet_init(void) static void __exit vnet_exit(void) { vio_unregister_driver(&vnet_port_driver); + vnet_cleanup(); } module_init(vnet_init); diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index eb78203cd58e..2aa57270838f 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -291,7 +291,11 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type); static int dfx_rcv_init(DFX_board_t *bp, int get_buffers); static void dfx_rcv_queue_process(DFX_board_t *bp); +#ifdef DYNAMIC_BUFFERS static void dfx_rcv_flush(DFX_board_t *bp); +#else +static inline void dfx_rcv_flush(DFX_board_t *bp) {} +#endif static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, struct net_device *dev); @@ -2849,7 +2853,7 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type) * Align an sk_buff to a boundary power of 2 * */ - +#ifdef DYNAMIC_BUFFERS static void my_skb_align(struct sk_buff *skb, int n) { unsigned long x = (unsigned long)skb->data; @@ -2859,7 +2863,7 @@ static void my_skb_align(struct sk_buff *skb, int n) skb_reserve(skb, v - x); } - +#endif /* * ================ @@ -3074,10 +3078,7 @@ static void dfx_rcv_queue_process( break; } else { -#ifndef DYNAMIC_BUFFERS - if (! rx_in_place) -#endif - { + if (!rx_in_place) { /* Receive buffer allocated, pass receive packet up */ skb_copy_to_linear_data(skb, @@ -3453,10 +3454,6 @@ static void dfx_rcv_flush( DFX_board_t *bp ) } } -#else -static inline void dfx_rcv_flush( DFX_board_t *bp ) -{ -} #endif /* DYNAMIC_BUFFERS */ /* diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 6a999e6814a0..9408157a246c 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1323,15 +1323,15 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, { struct dp83640_private *dp83640 = phydev->priv; - if (!dp83640->hwts_rx_en) - return false; - if (is_status_frame(skb, type)) { decode_status_frame(dp83640, skb); kfree_skb(skb); return true; } + if (!dp83640->hwts_rx_en) + return false; + SKB_PTP_TYPE(skb) = type; skb_queue_tail(&dp83640->rx_queue, skb); schedule_work(&dp83640->ts_work); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 2e58aa54484c..4eaadcfcb0fe 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -187,6 +187,50 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np) return d ? to_mii_bus(d) : NULL; } EXPORT_SYMBOL(of_mdio_find_bus); + +/* Walk the list of subnodes of a mdio bus and look for a node that matches the + * phy's address with its 'reg' property. If found, set the of_node pointer for + * the phy. This allows auto-probed pyh devices to be supplied with information + * passed in via DT. + */ +static void of_mdiobus_link_phydev(struct mii_bus *mdio, + struct phy_device *phydev) +{ + struct device *dev = &phydev->dev; + struct device_node *child; + + if (dev->of_node || !mdio->dev.of_node) + return; + + for_each_available_child_of_node(mdio->dev.of_node, child) { + int addr; + int ret; + + ret = of_property_read_u32(child, "reg", &addr); + if (ret < 0) { + dev_err(dev, "%s has invalid PHY address\n", + child->full_name); + continue; + } + + /* A PHY must have a reg property in the range [0-31] */ + if (addr >= PHY_MAX_ADDR) { + dev_err(dev, "%s PHY address %i is too large\n", + child->full_name, addr); + continue; + } + + if (addr == phydev->addr) { + dev->of_node = child; + return; + } + } +} +#else /* !IS_ENABLED(CONFIG_OF_MDIO) */ +static inline void of_mdiobus_link_phydev(struct mii_bus *mdio, + struct phy_device *phydev) +{ +} #endif /** diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 91d6c1272fcf..d5b77ef3a210 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p) { struct sock_fprog uprog; struct sock_filter *code = NULL; - int len, err; + int len; if (copy_from_user(&uprog, arg, sizeof(uprog))) return -EFAULT; @@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p) if (IS_ERR(code)) return PTR_ERR(code); - err = sk_chk_filter(code, uprog.len); - if (err) { - kfree(code); - return err; - } - *p = code; return uprog.len; } @@ -763,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) }; ppp_lock(ppp); - if (ppp->pass_filter) + if (ppp->pass_filter) { sk_unattached_filter_destroy(ppp->pass_filter); - err = sk_unattached_filter_create(&ppp->pass_filter, - &fprog); + ppp->pass_filter = NULL; + } + if (fprog.filter != NULL) + err = sk_unattached_filter_create(&ppp->pass_filter, + &fprog); + else + err = 0; kfree(code); ppp_unlock(ppp); } @@ -784,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) }; ppp_lock(ppp); - if (ppp->active_filter) + if (ppp->active_filter) { sk_unattached_filter_destroy(ppp->active_filter); - err = sk_unattached_filter_create(&ppp->active_filter, - &fprog); + ppp->active_filter = NULL; + } + if (fprog.filter != NULL) + err = sk_unattached_filter_create(&ppp->active_filter, + &fprog); + else + err = 0; kfree(code); ppp_unlock(ppp); } diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 2ea7efd11857..6c9c16d76935 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.hdrlen = (sizeof(struct pppoe_hdr) + dev->hard_header_len); - po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr); + po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2; po->chan.private = sk; po->chan.ops = &pppoe_chan_ops; diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index a3a05869309d..a4272ed62da8 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -258,10 +258,8 @@ struct hso_serial { * so as not to drop characters on the floor. */ int curr_rx_urb_idx; - u16 curr_rx_urb_offset; u8 rx_urb_filled[MAX_RX_URBS]; struct tasklet_struct unthrottle_tasklet; - struct work_struct retry_unthrottle_workqueue; }; struct hso_device { @@ -1252,14 +1250,6 @@ static void hso_unthrottle(struct tty_struct *tty) tasklet_hi_schedule(&serial->unthrottle_tasklet); } -static void hso_unthrottle_workfunc(struct work_struct *work) -{ - struct hso_serial *serial = - container_of(work, struct hso_serial, - retry_unthrottle_workqueue); - hso_unthrottle_tasklet(serial); -} - /* open the requested serial port */ static int hso_serial_open(struct tty_struct *tty, struct file *filp) { @@ -1295,8 +1285,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) tasklet_init(&serial->unthrottle_tasklet, (void (*)(unsigned long))hso_unthrottle_tasklet, (unsigned long)serial); - INIT_WORK(&serial->retry_unthrottle_workqueue, - hso_unthrottle_workfunc); result = hso_start_serial_device(serial->parent, GFP_KERNEL); if (result) { hso_stop_serial_device(serial->parent); @@ -1345,7 +1333,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) if (!usb_gone) hso_stop_serial_device(serial->parent); tasklet_kill(&serial->unthrottle_tasklet); - cancel_work_sync(&serial->retry_unthrottle_workqueue); } if (!usb_gone) @@ -2013,8 +2000,7 @@ static void ctrl_callback(struct urb *urb) static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) { struct tty_struct *tty; - int write_length_remaining = 0; - int curr_write_len; + int count; /* Sanity check */ if (urb == NULL || serial == NULL) { @@ -2024,29 +2010,28 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) tty = tty_port_tty_get(&serial->port); + if (tty && test_bit(TTY_THROTTLED, &tty->flags)) { + tty_kref_put(tty); + return -1; + } + /* Push data to tty */ - write_length_remaining = urb->actual_length - - serial->curr_rx_urb_offset; D1("data to push to tty"); - while (write_length_remaining) { - if (tty && test_bit(TTY_THROTTLED, &tty->flags)) { - tty_kref_put(tty); - return -1; - } - curr_write_len = tty_insert_flip_string(&serial->port, - urb->transfer_buffer + serial->curr_rx_urb_offset, - write_length_remaining); - serial->curr_rx_urb_offset += curr_write_len; - write_length_remaining -= curr_write_len; + count = tty_buffer_request_room(&serial->port, urb->actual_length); + if (count >= urb->actual_length) { + tty_insert_flip_string(&serial->port, urb->transfer_buffer, + urb->actual_length); tty_flip_buffer_push(&serial->port); + } else { + dev_warn(&serial->parent->usb->dev, + "dropping data, %d bytes lost\n", urb->actual_length); } + tty_kref_put(tty); - if (write_length_remaining == 0) { - serial->curr_rx_urb_offset = 0; - serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; - } - return write_length_remaining; + serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; + + return 0; } @@ -2217,7 +2202,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev) } } serial->curr_rx_urb_idx = 0; - serial->curr_rx_urb_offset = 0; if (serial->tx_urb) usb_kill_urb(serial->tx_urb); diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c index 5d95a13dbe2a..735f7dadb9a0 100644 --- a/drivers/net/usb/huawei_cdc_ncm.c +++ b/drivers/net/usb/huawei_cdc_ncm.c @@ -194,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), .driver_info = (unsigned long)&huawei_cdc_ncm_info, }, + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16), + .driver_info = (unsigned long)&huawei_cdc_ncm_info, + }, /* Terminating entry */ { diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index cf62d7e8329f..22756db53dca 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -667,6 +667,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, + {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ @@ -741,6 +742,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ @@ -756,6 +758,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */ {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ + {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 25431965a625..7bad2d316637 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1359,7 +1359,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb, struct sk_buff_head seg_list; struct sk_buff *segs, *nskb; - features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO); + features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); segs = skb_gso_segment(skb, features); if (IS_ERR(segs) || !segs) goto drop; @@ -3204,8 +3204,13 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev, struct r8152 *tp = netdev_priv(dev); struct tally_counter tally; + if (usb_autopm_get_interface(tp->intf) < 0) + return; + generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA); + usb_autopm_put_interface(tp->intf); + data[0] = le64_to_cpu(tally.tx_packets); data[1] = le64_to_cpu(tally.rx_packets); data[2] = le64_to_cpu(tally.tx_errors); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 424db65e4396..d07bf4cb893f 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1714,6 +1714,18 @@ static int smsc95xx_resume(struct usb_interface *intf) return ret; } +static int smsc95xx_reset_resume(struct usb_interface *intf) +{ + struct usbnet *dev = usb_get_intfdata(intf); + int ret; + + ret = smsc95xx_reset(dev); + if (ret < 0) + return ret; + + return smsc95xx_resume(intf); +} + static void smsc95xx_rx_csum_offload(struct sk_buff *skb) { skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2); @@ -2004,7 +2016,7 @@ static struct usb_driver smsc95xx_driver = { .probe = usbnet_probe, .suspend = smsc95xx_suspend, .resume = smsc95xx_resume, - .reset_resume = smsc95xx_resume, + .reset_resume = smsc95xx_reset_resume, .disconnect = usbnet_disconnect, .disable_hub_initiated_lpm = 1, .supports_autosuspend = 1, diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 93ace042d0aa..1f041271f7fe 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2363,7 +2363,7 @@ static char *type_strings[] = { "FarSync TE1" }; -static void +static int fst_init_card(struct fst_card_info *card) { int i; @@ -2374,24 +2374,21 @@ fst_init_card(struct fst_card_info *card) * we'll have to revise it in some way then. */ for (i = 0; i < card->nports; i++) { - err = register_hdlc_device(card->ports[i].dev); - if (err < 0) { - int j; + err = register_hdlc_device(card->ports[i].dev); + if (err < 0) { pr_err("Cannot register HDLC device for port %d (errno %d)\n", - i, -err); - for (j = i; j < card->nports; j++) { - free_netdev(card->ports[j].dev); - card->ports[j].dev = NULL; - } - card->nports = i; - break; - } + i, -err); + while (i--) + unregister_hdlc_device(card->ports[i].dev); + return err; + } } pr_info("%s-%s: %s IRQ%d, %d ports\n", port_to_dev(&card->ports[0])->name, port_to_dev(&card->ports[card->nports - 1])->name, type_strings[card->type], card->irq, card->nports); + return 0; } static const struct net_device_ops fst_ops = { @@ -2447,15 +2444,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Try to enable the device */ if ((err = pci_enable_device(pdev)) != 0) { pr_err("Failed to enable card. Err %d\n", -err); - kfree(card); - return err; + goto enable_fail; } if ((err = pci_request_regions(pdev, "FarSync")) !=0) { pr_err("Failed to allocate regions. Err %d\n", -err); - pci_disable_device(pdev); - kfree(card); - return err; + goto regions_fail; } /* Get virtual addresses of memory regions */ @@ -2464,30 +2458,21 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) card->phys_ctlmem = pci_resource_start(pdev, 3); if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) { pr_err("Physical memory remap failed\n"); - pci_release_regions(pdev); - pci_disable_device(pdev); - kfree(card); - return -ENODEV; + err = -ENODEV; + goto ioremap_physmem_fail; } if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) { pr_err("Control memory remap failed\n"); - pci_release_regions(pdev); - pci_disable_device(pdev); - iounmap(card->mem); - kfree(card); - return -ENODEV; + err = -ENODEV; + goto ioremap_ctlmem_fail; } dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem); /* Register the interrupt handler */ if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) { pr_err("Unable to register interrupt %d\n", card->irq); - pci_release_regions(pdev); - pci_disable_device(pdev); - iounmap(card->ctlmem); - iounmap(card->mem); - kfree(card); - return -ENODEV; + err = -ENODEV; + goto irq_fail; } /* Record info we need */ @@ -2513,13 +2498,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) while (i--) free_netdev(card->ports[i].dev); pr_err("FarSync: out of memory\n"); - free_irq(card->irq, card); - pci_release_regions(pdev); - pci_disable_device(pdev); - iounmap(card->ctlmem); - iounmap(card->mem); - kfree(card); - return -ENODEV; + err = -ENOMEM; + goto hdlcdev_fail; } card->ports[i].dev = dev; card->ports[i].card = card; @@ -2565,9 +2545,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, card); /* Remainder of card setup */ + if (no_of_cards_added >= FST_MAX_CARDS) { + pr_err("FarSync: too many cards\n"); + err = -ENOMEM; + goto card_array_fail; + } fst_card_array[no_of_cards_added] = card; card->card_no = no_of_cards_added++; /* Record instance and bump it */ - fst_init_card(card); + err = fst_init_card(card); + if (err) + goto init_card_fail; if (card->family == FST_FAMILY_TXU) { /* * Allocate a dma buffer for transmit and receives @@ -2577,29 +2564,46 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) &card->rx_dma_handle_card); if (card->rx_dma_handle_host == NULL) { pr_err("Could not allocate rx dma buffer\n"); - fst_disable_intr(card); - pci_release_regions(pdev); - pci_disable_device(pdev); - iounmap(card->ctlmem); - iounmap(card->mem); - kfree(card); - return -ENOMEM; + err = -ENOMEM; + goto rx_dma_fail; } card->tx_dma_handle_host = pci_alloc_consistent(card->device, FST_MAX_MTU, &card->tx_dma_handle_card); if (card->tx_dma_handle_host == NULL) { pr_err("Could not allocate tx dma buffer\n"); - fst_disable_intr(card); - pci_release_regions(pdev); - pci_disable_device(pdev); - iounmap(card->ctlmem); - iounmap(card->mem); - kfree(card); - return -ENOMEM; + err = -ENOMEM; + goto tx_dma_fail; } } return 0; /* Success */ + +tx_dma_fail: + pci_free_consistent(card->device, FST_MAX_MTU, + card->rx_dma_handle_host, + card->rx_dma_handle_card); +rx_dma_fail: + fst_disable_intr(card); + for (i = 0 ; i < card->nports ; i++) + unregister_hdlc_device(card->ports[i].dev); +init_card_fail: + fst_card_array[card->card_no] = NULL; +card_array_fail: + for (i = 0 ; i < card->nports ; i++) + free_netdev(card->ports[i].dev); +hdlcdev_fail: + free_irq(card->irq, card); +irq_fail: + iounmap(card->ctlmem); +ioremap_ctlmem_fail: + iounmap(card->mem); +ioremap_physmem_fail: + pci_release_regions(pdev); +regions_fail: + pci_disable_device(pdev); +enable_fail: + kfree(card); + return err; } /* diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 5895f1978691..fa9fdfa128c1 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu) { struct x25_asy *sl = netdev_priv(dev); unsigned char *xbuff, *rbuff; - int len = 2 * newmtu; + int len; + if (newmtu > 65534) + return -EINVAL; + + len = 2 * newmtu; xbuff = kmalloc(len + 4, GFP_ATOMIC); rbuff = kmalloc(len + 4, GFP_ATOMIC); diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 82017f56e661..e6c56c5bb0f6 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -795,7 +795,11 @@ int ath10k_core_start(struct ath10k *ar) if (status) goto err_htc_stop; - ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) + ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1; + else + ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; + INIT_LIST_HEAD(&ar->arvifs); if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 6c102b1312ff..eebc860c3655 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -312,7 +312,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, int msdu_len, msdu_chaining = 0; struct sk_buff *msdu; struct htt_rx_desc *rx_desc; - bool corrupted = false; lockdep_assert_held(&htt->rx_ring.lock); @@ -439,9 +438,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & RX_MSDU_END_INFO0_LAST_MSDU; - if (msdu_chaining && !last_msdu) - corrupted = true; - if (last_msdu) { msdu->next = NULL; break; @@ -457,20 +453,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, msdu_chaining = -1; /* - * Apparently FW sometimes reports weird chained MSDU sequences with - * more than one rx descriptor. This seems like a bug but needs more - * analyzing. For the time being fix it by dropping such sequences to - * avoid blowing up the host system. - */ - if (corrupted) { - ath10k_warn("failed to pop chained msdus, dropping\n"); - ath10k_htt_rx_free_msdu_chain(*head_msdu); - *head_msdu = NULL; - *tail_msdu = NULL; - msdu_chaining = -EINVAL; - } - - /* * Don't refill the ring yet. * * First, the elements popped here are still in use - it is not diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index 6db51a666f61..d06fcb05adf2 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c @@ -1184,8 +1184,6 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) bus->bus_priv.usb = bus_pub; dev_set_drvdata(dev, bus); bus->ops = &brcmf_usb_bus_ops; - bus->chip = bus_pub->devid; - bus->chiprev = bus_pub->chiprev; bus->proto_type = BRCMF_PROTO_BCDC; bus->always_use_fws_queue = true; @@ -1194,6 +1192,9 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) if (ret) goto fail; } + bus->chip = bus_pub->devid; + bus->chiprev = bus_pub->chiprev; + /* request firmware here */ brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL, brcmf_usb_probe_phase2); diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index ed50de6362ed..6dc5dd3ced44 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) /* recalculate basic rates */ iwl_calc_basic_rates(priv, ctx); - /* - * force CTS-to-self frames protection if RTS-CTS is not preferred - * one aggregation protection method - */ - if (!priv->hw_params.use_rts_for_aggregation) - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; - if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; @@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, else ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; - if (bss_conf->use_cts_prot) - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; - else - ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; - memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); if (vif->type == NL80211_IFTYPE_AP || diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index 0aa7c0085c9f..b1a33322b9ba 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -88,6 +88,7 @@ * P2P client interfaces simultaneously if they are in different bindings. * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and * P2P client interfaces simultaneously if they are in same bindings. + * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 8b5302777632..725ba49576bf 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -667,10 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, if (vif->bss_conf.qos) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); - if (vif->bss_conf.use_cts_prot) { + if (vif->bss_conf.use_cts_prot) cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); - cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN); - } + IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", vif->bss_conf.use_cts_prot, vif->bss_conf.ht_operation_mode); diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 7215f5980186..9bfb90680cdc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -303,6 +303,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; } + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT && + !iwlwifi_mod_params.uapsd_disable) { + hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD; + hw->uapsd_queues = IWL_UAPSD_AC_INFO; + hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; + } + hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->chanctx_data_size = sizeof(u16); @@ -1159,8 +1166,12 @@ static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac, bcast_mac = &cmd->macs[mvmvif->id]; - /* enable filtering only for associated stations */ - if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) + /* + * enable filtering only for associated stations, but not for P2P + * Clients + */ + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p || + !vif->bss_conf.assoc) return; bcast_mac->default_discard = 1; @@ -1237,10 +1248,6 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm, if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) return 0; - /* bcast filtering isn't supported for P2P client */ - if (vif->p2p) - return 0; - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) return 0; diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 4b6c7d4bd199..eac2b424f6a0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -588,9 +588,7 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm, struct iwl_scan_offload_cmd *scan, struct iwl_mvm_scan_params *params) { - scan->channel_count = - mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels + - mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; + scan->channel_count = req->n_channels; scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME); scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH); scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT; @@ -669,61 +667,37 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req, struct iwl_scan_channel_cfg *channels, enum ieee80211_band band, - int *head, int *tail, + int *head, u32 ssid_bitmap, struct iwl_mvm_scan_params *params) { - struct ieee80211_supported_band *s_band; - int n_channels = req->n_channels; - int i, j, index = 0; - bool partial; + int i, index = 0; - /* - * We have to configure all supported channels, even if we don't want to - * scan on them, but we have to send channels in the order that we want - * to scan. So add requested channels to head of the list and others to - * the end. - */ - s_band = &mvm->nvm_data->bands[band]; - - for (i = 0; i < s_band->n_channels && *head <= *tail; i++) { - partial = false; - for (j = 0; j < n_channels; j++) - if (s_band->channels[i].center_freq == - req->channels[j]->center_freq) { - index = *head; - (*head)++; - /* - * Channels that came with the request will be - * in partial scan . - */ - partial = true; - break; - } - if (!partial) { - index = *tail; - (*tail)--; - } - channels->channel_number[index] = - cpu_to_le16(ieee80211_frequency_to_channel( - s_band->channels[i].center_freq)); + for (i = 0; i < req->n_channels; i++) { + struct ieee80211_channel *chan = req->channels[i]; + + if (chan->band != band) + continue; + + index = *head; + (*head)++; + + channels->channel_number[index] = cpu_to_le16(chan->hw_value); channels->dwell_time[index][0] = params->dwell[band].active; channels->dwell_time[index][1] = params->dwell[band].passive; channels->iter_count[index] = cpu_to_le16(1); channels->iter_interval[index] = 0; - if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR)) + if (!(chan->flags & IEEE80211_CHAN_NO_IR)) channels->type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE); channels->type[index] |= - cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL); - if (partial) - channels->type[index] |= - cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL); + cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL | + IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL); - if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40) + if (chan->flags & IEEE80211_CHAN_NO_HT40) channels->type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW); @@ -740,7 +714,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; int head = 0; - int tail = band_2ghz + band_5ghz - 1; u32 ssid_bitmap; int cmd_len; int ret; @@ -772,7 +745,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, &scan_cfg->scan_cmd.tx_cmd[0], scan_cfg->data); iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg, - IEEE80211_BAND_2GHZ, &head, &tail, + IEEE80211_BAND_2GHZ, &head, ssid_bitmap, ¶ms); } if (band_5ghz) { @@ -782,7 +755,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, scan_cfg->data + SCAN_OFFLOAD_PROBE_REQ_SIZE); iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg, - IEEE80211_BAND_5GHZ, &head, &tail, + IEEE80211_BAND_5GHZ, &head, ssid_bitmap, ¶ms); } diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 7091a18d5a72..98950e45c7b0 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, @@ -380,7 +381,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index 5b32106182f8..fe0f66f73507 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c @@ -185,6 +185,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); + memset(tx_info_aggr, 0, sizeof(*tx_info_aggr)); tx_info_aggr->bss_type = tx_info_src->bss_type; tx_info_aggr->bss_num = tx_info_src->bss_num; diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index e95dec91a561..b511613bba2d 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -220,6 +220,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, } tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->pkt_len = pkt_len; diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 8dee6c86f4f1..c161141f6c39 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -453,6 +453,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter) if (skb) { rx_info = MWIFIEX_SKB_RXCB(skb); + memset(rx_info, 0, sizeof(*rx_info)); rx_info->bss_num = priv->bss_num; rx_info->bss_type = priv->bss_type; } diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index cbabc12fbda3..e91cd0fa5ca8 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -645,6 +645,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->pkt_len = skb->len; diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c index 5fce7e78a36e..70eb863c7249 100644 --- a/drivers/net/wireless/mwifiex/sta_tx.c +++ b/drivers/net/wireless/mwifiex/sta_tx.c @@ -150,6 +150,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags) return -1; tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN); diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c index e73034fbbde9..0e88364e0c67 100644 --- a/drivers/net/wireless/mwifiex/tdls.c +++ b/drivers/net/wireless/mwifiex/tdls.c @@ -605,6 +605,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer, } tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; @@ -760,6 +761,7 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer, skb->priority = MWIFIEX_PRIO_VI; tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index 37f26afd4314..fd7e5b9b4581 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -55,6 +55,7 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter, return -1; } + memset(rx_info, 0, sizeof(*rx_info)); rx_info->bss_num = priv->bss_num; rx_info->bss_type = priv->bss_type; diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c index 9a56bc61cb1d..b0601b91cc4f 100644 --- a/drivers/net/wireless/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/mwifiex/uap_txrx.c @@ -175,6 +175,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, } tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT; diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index e11dab2216c6..832006b5aab1 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -231,9 +231,12 @@ static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer) */ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev) { - __le32 reg; + __le32 *reg; u32 fw_mode; + reg = kmalloc(sizeof(*reg), GFP_KERNEL); + if (reg == NULL) + return -ENOMEM; /* cannot use rt2x00usb_register_read here as it uses different * mode (MULTI_READ vs. DEVICE_MODE) and does not pass the * magic value USB_MODE_AUTORUN (0x11) to the device, thus the @@ -241,8 +244,9 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev) */ rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE, USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN, - ®, sizeof(reg), REGISTER_TIMEOUT_FIRMWARE); - fw_mode = le32_to_cpu(reg); + reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE); + fw_mode = le32_to_cpu(*reg); + kfree(reg); if ((fw_mode & 0x00000003) == 2) return 1; @@ -261,6 +265,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev, int status; u32 offset; u32 length; + int retval; /* * Check which section of the firmware we need. @@ -278,7 +283,10 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev, /* * Write firmware to device. */ - if (rt2800usb_autorun_detect(rt2x00dev)) { + retval = rt2800usb_autorun_detect(rt2x00dev); + if (retval < 0) + return retval; + if (retval) { rt2x00_info(rt2x00dev, "Firmware loading not required - NIC in AutoRun mode\n"); } else { @@ -763,7 +771,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, */ static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev) { - if (rt2800usb_autorun_detect(rt2x00dev)) + int retval; + + retval = rt2800usb_autorun_detect(rt2x00dev); + if (retval < 0) + return retval; + if (retval) return 1; return rt2800_efuse_detect(rt2x00dev); } @@ -772,7 +785,10 @@ static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev) { int retval; - if (rt2800usb_efuse_detect(rt2x00dev)) + retval = rt2800usb_efuse_detect(rt2x00dev); + if (retval < 0) + return retval; + if (retval) retval = rt2800_read_eeprom_efuse(rt2x00dev); else retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 1844a47636b6..c65b636bcab9 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, { struct gnttab_map_grant_ref *gop_map = *gopp_map; u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; + /* This always points to the shinfo of the skb being checked, which + * could be either the first or the one on the frag_list + */ struct skb_shared_info *shinfo = skb_shinfo(skb); + /* If this is non-NULL, we are currently checking the frag_list skb, and + * this points to the shinfo of the first one + */ + struct skb_shared_info *first_shinfo = NULL; int nr_frags = shinfo->nr_frags; + const bool sharedslot = nr_frags && + frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; int i, err; - struct sk_buff *first_skb = NULL; /* Check status of header. */ err = (*gopp_copy)->status; - (*gopp_copy)++; if (unlikely(err)) { if (net_ratelimit()) netdev_dbg(queue->vif->dev, @@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, (*gopp_copy)->status, pending_idx, (*gopp_copy)->source.u.ref); - xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); + /* The first frag might still have this slot mapped */ + if (!sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_ERROR); } + (*gopp_copy)++; check_frags: for (i = 0; i < nr_frags; i++, gop_map++) { @@ -1062,8 +1073,19 @@ check_frags: pending_idx, gop_map->handle); /* Had a previous error? Invalidate this fragment. */ - if (unlikely(err)) + if (unlikely(err)) { xenvif_idx_unmap(queue, pending_idx); + /* If the mapping of the first frag was OK, but + * the header's copy failed, and they are + * sharing a slot, send an error + */ + if (i == 0 && sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_ERROR); + else + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } continue; } @@ -1075,42 +1097,53 @@ check_frags: gop_map->status, pending_idx, gop_map->ref); + xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); /* Not the first error? Preceding frags already invalidated. */ if (err) continue; - /* First error: invalidate preceding fragments. */ + + /* First error: if the header haven't shared a slot with the + * first frag, release it as well. + */ + if (!sharedslot) + xenvif_idx_release(queue, + XENVIF_TX_CB(skb)->pending_idx, + XEN_NETIF_RSP_OKAY); + + /* Invalidate preceding fragments of this skb. */ for (j = 0; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); xenvif_idx_unmap(queue, pending_idx); + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } + + /* And if we found the error while checking the frag_list, unmap + * the first skb's frags + */ + if (first_shinfo) { + for (j = 0; j < first_shinfo->nr_frags; j++) { + pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); + xenvif_idx_unmap(queue, pending_idx); + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } } /* Remember the error: invalidate all subsequent fragments. */ err = newerr; } - if (skb_has_frag_list(skb)) { - first_skb = skb; - skb = shinfo->frag_list; - shinfo = skb_shinfo(skb); + if (skb_has_frag_list(skb) && !first_shinfo) { + first_shinfo = skb_shinfo(skb); + shinfo = skb_shinfo(skb_shinfo(skb)->frag_list); nr_frags = shinfo->nr_frags; goto check_frags; } - /* There was a mapping error in the frag_list skb. We have to unmap - * the first skb's frags - */ - if (first_skb && err) { - int j; - shinfo = skb_shinfo(first_skb); - for (j = 0; j < shinfo->nr_frags; j++) { - pending_idx = frag_get_pending_idx(&shinfo->frags[j]); - xenvif_idx_unmap(queue, pending_idx); - } - } - *gopp_map = gop_map; return err; } @@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) /* Check the remap error code. */ if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { + /* If there was an error, xenvif_tx_check_gop is + * expected to release all the frags which were mapped, + * so kfree_skb shouldn't do it again + */ skb_shinfo(skb)->nr_frags = 0; + if (skb_has_frag_list(skb)) { + struct sk_buff *nskb = + skb_shinfo(skb)->frag_list; + skb_shinfo(nskb)->nr_frags = 0; + } kfree_skb(skb); continue; } @@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) tx_unmap_op.status); BUG(); } - - xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY); } static inline int rx_work_todo(struct xenvif_queue *queue) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 2ccb4a02368b..055222bae6e4 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info) unsigned int i = 0; unsigned int num_queues = info->netdev->real_num_tx_queues; + netif_carrier_off(info->netdev); + for (i = 0; i < num_queues; ++i) { struct netfront_queue *queue = &info->queues[i]; - /* Stop old i/f to prevent errors whilst we rebuild the state. */ - spin_lock_bh(&queue->rx_lock); - spin_lock_irq(&queue->tx_lock); - netif_carrier_off(queue->info->netdev); - spin_unlock_irq(&queue->tx_lock); - spin_unlock_bh(&queue->rx_lock); - if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) unbind_from_irqhandler(queue->tx_irq, queue); if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { @@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info) queue->tx_evtchn = queue->rx_evtchn = 0; queue->tx_irq = queue->rx_irq = 0; + napi_synchronize(&queue->napi); + /* End access and free the pages */ xennet_end_access(queue->tx_ring_ref, queue->tx.sring); xennet_end_access(queue->rx_ring_ref, queue->rx.sring); @@ -2046,13 +2043,15 @@ static int xennet_connect(struct net_device *dev) /* By now, the queue structures have been set up */ for (j = 0; j < num_queues; ++j) { queue = &np->queues[j]; - spin_lock_bh(&queue->rx_lock); - spin_lock_irq(&queue->tx_lock); /* Step 1: Discard all pending TX packet fragments. */ + spin_lock_irq(&queue->tx_lock); xennet_release_tx_bufs(queue); + spin_unlock_irq(&queue->tx_lock); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ + spin_lock_bh(&queue->rx_lock); + for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { skb_frag_t *frag; const struct page *page; @@ -2076,6 +2075,8 @@ static int xennet_connect(struct net_device *dev) } queue->rx.req_prod_pvt = requeue_idx; + + spin_unlock_bh(&queue->rx_lock); } /* @@ -2087,13 +2088,17 @@ static int xennet_connect(struct net_device *dev) netif_carrier_on(np->netdev); for (j = 0; j < num_queues; ++j) { queue = &np->queues[j]; + notify_remote_via_irq(queue->tx_irq); if (queue->tx_irq != queue->rx_irq) notify_remote_via_irq(queue->rx_irq); - xennet_tx_buf_gc(queue); - xennet_alloc_rx_buffers(queue); + spin_lock_irq(&queue->tx_lock); + xennet_tx_buf_gc(queue); spin_unlock_irq(&queue->tx_lock); + + spin_lock_bh(&queue->rx_lock); + xennet_alloc_rx_buffers(queue); spin_unlock_bh(&queue->rx_lock); } diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index c4cddf0cd96d..b777d8f46bd5 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -880,6 +880,21 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) const u64 phys_offset = __pa(PAGE_OFFSET); base &= PAGE_MASK; size &= PAGE_MASK; + + if (sizeof(phys_addr_t) < sizeof(u64)) { + if (base > ULONG_MAX) { + pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + + if (base + size > ULONG_MAX) { + pr_warning("Ignoring memory range 0x%lx - 0x%llx\n", + ULONG_MAX, base + size); + size = ULONG_MAX - base; + } + } + if (base + size < phys_offset) { pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", base, base + size); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index a3bf2122a8d5..401b2453da45 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -182,40 +182,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) } EXPORT_SYMBOL(of_mdiobus_register); -/** - * of_mdiobus_link_phydev - Find a device node for a phy - * @mdio: pointer to mii_bus structure - * @phydev: phydev for which the of_node pointer should be set - * - * Walk the list of subnodes of a mdio bus and look for a node that matches the - * phy's address with its 'reg' property. If found, set the of_node pointer for - * the phy. This allows auto-probed pyh devices to be supplied with information - * passed in via DT. - */ -void of_mdiobus_link_phydev(struct mii_bus *mdio, - struct phy_device *phydev) -{ - struct device *dev = &phydev->dev; - struct device_node *child; - - if (dev->of_node || !mdio->dev.of_node) - return; - - for_each_available_child_of_node(mdio->dev.of_node, child) { - int addr; - - addr = of_mdio_parse_addr(&mdio->dev, child); - if (addr < 0) - continue; - - if (addr == phydev->addr) { - dev->of_node = child; - return; - } - } -} -EXPORT_SYMBOL(of_mdiobus_link_phydev); - /* Helper function for of_phy_find_device */ static int of_phy_match(struct device *dev, void *phy_np) { diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig index 2872ece81f35..44333bd8f908 100644 --- a/drivers/parport/Kconfig +++ b/drivers/parport/Kconfig @@ -5,6 +5,12 @@ # Parport configuration. # +config ARCH_MIGHT_HAVE_PC_PARPORT + bool + help + Select this config option from the architecture Kconfig if + the architecture might have PC parallel port hardware. + menuconfig PARPORT tristate "Parallel port support" depends on HAS_IOMEM @@ -31,12 +37,6 @@ menuconfig PARPORT If unsure, say Y. -config ARCH_MIGHT_HAVE_PC_PARPORT - bool - help - Select this config option from the architecture Kconfig if - the architecture might have PC parallel port hardware. - if PARPORT config PARPORT_PC diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 63a54a340863..1c8592b0e146 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3135,8 +3135,13 @@ static int pci_af_flr(struct pci_dev *dev, int probe) if (probe) return 0; - /* Wait for Transaction Pending bit clean */ - if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP)) + /* + * Wait for Transaction Pending bit to clear. A word-aligned test + * is used, so we use the conrol offset rather than status and shift + * the test bit to match. + */ + if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL, + PCI_AF_STATUS_TP << 8)) goto clear; dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 16a2f067c242..64b98d242ea6 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -112,6 +112,7 @@ config PHY_EXYNOS5250_SATA config PHY_SUN4I_USB tristate "Allwinner sunxi SoC USB PHY driver" depends on ARCH_SUNXI && HAS_IOMEM && OF + depends on RESET_CONTROLLER select GENERIC_PHY help Enable this to support the transceiver that is part of Allwinner @@ -122,6 +123,7 @@ config PHY_SUN4I_USB config PHY_SAMSUNG_USB2 tristate "Samsung USB 2.0 PHY driver" + depends on HAS_IOMEM select GENERIC_PHY select MFD_SYSCON help diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index c64a2f3b2d62..49c446530101 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -614,8 +614,9 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, return phy; put_dev: - put_device(&phy->dev); - ida_remove(&phy_ida, phy->id); + put_device(&phy->dev); /* calls phy_release() which frees resources */ + return ERR_PTR(ret); + free_phy: kfree(phy); return ERR_PTR(ret); @@ -799,7 +800,7 @@ static void phy_release(struct device *dev) phy = to_phy(dev); dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); - ida_remove(&phy_ida, phy->id); + ida_simple_remove(&phy_ida, phy->id); kfree(phy); } diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c index 7007c11fe07d..34b396146c8a 100644 --- a/drivers/phy/phy-omap-usb2.c +++ b/drivers/phy/phy-omap-usb2.c @@ -233,8 +233,8 @@ static int omap_usb2_probe(struct platform_device *pdev) if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) { res = platform_get_resource(pdev, IORESOURCE_MEM, 0); phy->phy_base = devm_ioremap_resource(&pdev->dev, res); - if (!phy->phy_base) - return -ENOMEM; + if (IS_ERR(phy->phy_base)) + return PTR_ERR(phy->phy_base); phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT; } @@ -262,7 +262,6 @@ static int omap_usb2_probe(struct platform_device *pdev) otg->phy = &phy->phy; platform_set_drvdata(pdev, phy); - pm_runtime_enable(phy->dev); generic_phy = devm_phy_create(phy->dev, &ops, NULL); if (IS_ERR(generic_phy)) @@ -270,10 +269,13 @@ static int omap_usb2_probe(struct platform_device *pdev) phy_set_drvdata(generic_phy, phy); + pm_runtime_enable(phy->dev); phy_provider = devm_of_phy_provider_register(phy->dev, of_phy_simple_xlate); - if (IS_ERR(phy_provider)) + if (IS_ERR(phy_provider)) { + pm_runtime_disable(phy->dev); return PTR_ERR(phy_provider); + } phy->wkupclk = devm_clk_get(phy->dev, "wkupclk"); if (IS_ERR(phy->wkupclk)) { @@ -317,6 +319,7 @@ static int omap_usb2_remove(struct platform_device *pdev) if (!IS_ERR(phy->optclk)) clk_unprepare(phy->optclk); usb_remove_phy(&phy->phy); + pm_runtime_disable(phy->dev); return 0; } diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c index 8a8c6bc8709a..1e69a32c221d 100644 --- a/drivers/phy/phy-samsung-usb2.c +++ b/drivers/phy/phy-samsung-usb2.c @@ -107,6 +107,7 @@ static const struct of_device_id samsung_usb2_phy_of_match[] = { #endif { }, }; +MODULE_DEVICE_TABLE(of, samsung_usb2_phy_of_match); static int samsung_usb2_phy_probe(struct platform_device *pdev) { diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c index edf5d2fd2b22..86db2235ab00 100644 --- a/drivers/pinctrl/berlin/berlin.c +++ b/drivers/pinctrl/berlin/berlin.c @@ -320,7 +320,7 @@ int berlin_pinctrl_probe(struct platform_device *pdev, regmap = dev_get_regmap(&pdev->dev, NULL); if (!regmap) - return PTR_ERR(regmap); + return -ENODEV; pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL); if (!pctrl) diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index 1bd6363bc95e..9f43916637ca 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc) status = readl(info->irqmux_base); - for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK) + for_each_set_bit(n, &status, info->nbanks) __gpio_irq_handler(&info->banks[n]); chained_irq_exit(chip, desc); diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index f1ca75e6d7b1..5f38c7f67834 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -211,6 +211,10 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, configlen++; pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL); + if (!pinconfig) { + kfree(*map); + return -ENOMEM; + } if (!of_property_read_u32(node, "allwinner,drive", &val)) { u16 strength = (val + 1) * 10; diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 15b3459f8656..220acb4cbee5 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data) } else raw3270_writesf_readpart(rp); memset(&rp->init_reset, 0, sizeof(rp->init_reset)); - memset(&rp->init_data, 0, sizeof(rp->init_data)); } static int diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 69ef4f8cfac8..4038437ff033 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev) int rc; ap_dev->drv = ap_drv; + + spin_lock_bh(&ap_device_list_lock); + list_add(&ap_dev->list, &ap_device_list); + spin_unlock_bh(&ap_device_list_lock); + rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; - if (!rc) { + if (rc) { spin_lock_bh(&ap_device_list_lock); - list_add(&ap_dev->list, &ap_device_list); + list_del_init(&ap_dev->list); spin_unlock_bh(&ap_device_list_lock); } return rc; diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 554349029628..56467df3d6de 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -4198,6 +4198,8 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) kfree(phba->ep_array); phba->ep_array = NULL; ret = -ENOMEM; + + goto free_memory; } for (i = 0; i < phba->params.cxns_per_ctrl; i++) { diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 6045aa78986a..07934b0b9ee1 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1008,10 +1008,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba, BE2_IPV6 : BE2_IPV4 ; rc = mgmt_get_if_info(phba, ip_type, &if_info); - if (rc) { - kfree(if_info); + if (rc) return rc; - } if (boot_proto == ISCSI_BOOTPROTO_DHCP) { if (if_info->dhcp_state) { diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index f54843023466..785d0d71781e 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -516,23 +516,17 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); - stats = per_cpu_ptr(lport->stats, get_cpu()); - stats->RxFrames++; - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; - fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; fr_sof(fp) = hp->fcoe_sof; if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { - put_cpu(); kfree_skb(skb); return; } fr_eof(fp) = crc_eof.fcoe_eof; fr_crc(fp) = crc_eof.fcoe_crc32; if (pskb_trim(skb, fr_len)) { - put_cpu(); kfree_skb(skb); return; } @@ -544,7 +538,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) port = lport_priv(vn_port); if (!ether_addr_equal(port->data_src_addr, dest_mac)) { BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); - put_cpu(); kfree_skb(skb); return; } @@ -552,7 +545,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) { /* Drop FCP data. We dont this in L2 path */ - put_cpu(); kfree_skb(skb); return; } @@ -562,7 +554,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) case ELS_LOGO: if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { /* drop non-FIP LOGO */ - put_cpu(); kfree_skb(skb); return; } @@ -572,22 +563,23 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { /* Drop incoming ABTS */ - put_cpu(); kfree_skb(skb); return; } + stats = per_cpu_ptr(lport->stats, smp_processor_id()); + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + if (le32_to_cpu(fr_crc(fp)) != ~crc32(~0, skb->data, fr_len)) { if (stats->InvalidCRCCount < 5) printk(KERN_WARNING PFX "dropping frame with " "CRC error\n"); stats->InvalidCRCCount++; - put_cpu(); kfree_skb(skb); return; } - put_cpu(); fc_exch_recv(lport, fp); } diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 32a5e0a2a669..7bc47fc7c686 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -282,6 +282,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) arr_sz, GFP_KERNEL); if (!cmgr->free_list_lock) { printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); + kfree(cmgr->free_list); + cmgr->free_list = NULL; goto mem_err; } diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 2ebfb2bb0f42..7b23f21f22f1 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) if (crq->valid & 0x80) { if (++queue->cur == queue->size) queue->cur = 0; + + /* Ensure the read of the valid bit occurs before reading any + * other bits of the CRQ entry + */ + rmb(); } else crq = NULL; spin_unlock_irqrestore(&queue->lock, flags); @@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, { struct vio_dev *vdev = to_vio_dev(hostdata->dev); + /* + * Ensure the command buffer is flushed to memory before handing it + * over to the VIOS to prevent it from fetching any stale data. + */ + mb(); return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); } @@ -797,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) evt->hostdata->dev); if (evt->cmnd_done) evt->cmnd_done(evt->cmnd); - } else if (evt->done) + } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT && + evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ) evt->done(evt); free_event_struct(&evt->hostdata->pool, evt); spin_lock_irqsave(hostdata->host->host_lock, flags); diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index c4f31b21feb8..e90c89f1d480 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -677,7 +677,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) * pm8001_get_phy_settings_info : Read phy setting values. * @pm8001_ha : our hba. */ -void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) +static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) { #ifdef PM8001_READ_VPD @@ -691,11 +691,15 @@ void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) payload.offset = 0; payload.length = 4096; payload.func_specific = kzalloc(4096, GFP_KERNEL); + if (!payload.func_specific) + return -ENOMEM; /* Read phy setting values from flash */ PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); wait_for_completion(&completion); pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); + kfree(payload.func_specific); #endif + return 0; } #ifdef PM8001_USE_MSIX @@ -879,8 +883,11 @@ static int pm8001_pci_probe(struct pci_dev *pdev, pm8001_init_sas_add(pm8001_ha); /* phy setting support for motherboard controller */ if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 && - pdev->subsystem_vendor != 0) - pm8001_get_phy_settings_info(pm8001_ha); + pdev->subsystem_vendor != 0) { + rc = pm8001_get_phy_settings_info(pm8001_ha); + if (rc) + goto err_out_shost; + } pm8001_post_sas_ha_init(shost, chip); rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); if (rc) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 4b188b0164e9..e632e14180cf 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1128,7 +1128,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, ctio->u.status1.flags = __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); - ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; + ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); qla2x00_start_iocbs(vha, vha->req); @@ -1262,6 +1262,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, { struct atio_from_isp *atio = &mcmd->orig_iocb.atio; struct ctio7_to_24xx *ctio; + uint16_t temp; ql_dbg(ql_dbg_tgt, ha, 0xe008, "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", @@ -1292,7 +1293,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, ctio->u.status1.flags = (atio->u.isp24.attr << 9) | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); - ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio->u.status1.ox_id = cpu_to_le16(temp); ctio->u.status1.scsi_status = __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); ctio->u.status1.response_len = __constant_cpu_to_le16(8); @@ -1513,6 +1515,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, struct ctio7_to_24xx *pkt; struct qla_hw_data *ha = vha->hw; struct atio_from_isp *atio = &prm->cmd->atio; + uint16_t temp; pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; prm->pkt = pkt; @@ -1541,13 +1544,13 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; pkt->exchange_addr = atio->u.isp24.exchange_addr; pkt->u.status0.flags |= (atio->u.isp24.attr << 9); - pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + pkt->u.status0.ox_id = cpu_to_le16(temp); pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); ql_dbg(ql_dbg_tgt, vha, 0xe00c, "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", - vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, - le16_to_cpu(pkt->u.status0.ox_id)); + vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp); return 0; } @@ -2619,6 +2622,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, struct qla_hw_data *ha = vha->hw; request_t *pkt; int ret = 0; + uint16_t temp; ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); @@ -2655,7 +2659,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); - ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio24->u.status1.ox_id = cpu_to_le16(temp); /* Most likely, it isn't needed */ ctio24->u.status1.residual = get_unaligned((uint32_t *) diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index e0a58fd13f66..d1d24fb0160a 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -443,7 +443,7 @@ struct ctio7_to_24xx { uint16_t reserved1; __le16 flags; uint32_t residual; - uint16_t ox_id; + __le16 ox_id; uint16_t scsi_status; uint32_t relative_offset; uint32_t reserved2; @@ -458,7 +458,7 @@ struct ctio7_to_24xx { uint16_t sense_length; uint16_t flags; uint32_t residual; - uint16_t ox_id; + __le16 ox_id; uint16_t scsi_status; uint16_t response_len; uint16_t reserved; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index cbe38e5e7955..7e957918f33f 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -131,7 +131,7 @@ scmd_eh_abort_handler(struct work_struct *work) "aborting command %p\n", scmd)); rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); if (rtn == SUCCESS) { - scmd->result |= DID_TIME_OUT << 16; + set_host_byte(scmd, DID_TIME_OUT); if (scsi_host_eh_past_deadline(sdev->host)) { SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, @@ -167,7 +167,7 @@ scmd_eh_abort_handler(struct work_struct *work) scmd_printk(KERN_WARNING, scmd, "scmd %p terminate " "aborted command\n", scmd)); - scmd->result |= DID_TIME_OUT << 16; + set_host_byte(scmd, DID_TIME_OUT); scsi_finish_command(scmd); } } @@ -287,15 +287,15 @@ enum blk_eh_timer_return scsi_times_out(struct request *req) else if (host->hostt->eh_timed_out) rtn = host->hostt->eh_timed_out(scmd); - if (rtn == BLK_EH_NOT_HANDLED && !host->hostt->no_async_abort) - if (scsi_abort_command(scmd) == SUCCESS) + if (rtn == BLK_EH_NOT_HANDLED) { + if (!host->hostt->no_async_abort && + scsi_abort_command(scmd) == SUCCESS) return BLK_EH_NOT_HANDLED; - scmd->result |= DID_TIME_OUT << 16; - - if (unlikely(rtn == BLK_EH_NOT_HANDLED && - !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) - rtn = BLK_EH_HANDLED; + set_host_byte(scmd, DID_TIME_OUT); + if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) + rtn = BLK_EH_HANDLED; + } return rtn; } @@ -1777,7 +1777,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) break; case DID_ABORT: if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) { - scmd->result |= DID_TIME_OUT << 16; + set_host_byte(scmd, DID_TIME_OUT); return SUCCESS; } case DID_NO_CONNECT: diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index f80908f74ca9..521f5838594b 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2549,6 +2549,7 @@ fc_rport_final_delete(struct work_struct *work) fc_flush_devloss(shost); if (!cancel_delayed_work(&rport->dev_loss_work)) fc_flush_devloss(shost); + cancel_work_sync(&rport->scan_work); spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e9689d57ccb6..6825eda1114a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2441,7 +2441,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) } sdkp->DPOFUA = (data.device_specific & 0x10) != 0; - if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { + if (sdp->broken_fua) { + sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); + sdkp->DPOFUA = 0; + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { sd_first_printk(KERN_NOTICE, sdkp, "Uses READ/WRITE(6), disabling FUA\n"); sdkp->DPOFUA = 0; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 89ee5929eb6d..308256b5e4cb 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -237,6 +237,16 @@ static void virtscsi_req_done(struct virtqueue *vq) virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); }; +static void virtscsi_poll_requests(struct virtio_scsi *vscsi) +{ + int i, num_vqs; + + num_vqs = vscsi->num_queues; + for (i = 0; i < num_vqs; i++) + virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], + virtscsi_complete_cmd); +} + static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; @@ -253,6 +263,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq) virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); }; +static void virtscsi_handle_event(struct work_struct *work); + static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct virtio_scsi_event_node *event_node) { @@ -260,6 +272,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct scatterlist sg; unsigned long flags; + INIT_WORK(&event_node->work, virtscsi_handle_event); sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); @@ -377,7 +390,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_event_node *event_node = buf; - INIT_WORK(&event_node->work, virtscsi_handle_event); schedule_work(&event_node->work); } @@ -589,6 +601,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) ret = SUCCESS; + /* + * The spec guarantees that all requests related to the TMF have + * been completed, but the callback might not have run yet if + * we're using independent interrupts (e.g. MSI). Poll the + * virtqueues once. + * + * In the abort case, sc->scsi_done will do nothing, because + * the block layer must have detected a timeout and as a result + * REQ_ATOM_COMPLETE has been set. + */ + virtscsi_poll_requests(vscsi); + out: mempool_free(cmd, virtscsi_cmd_pool); return ret; diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c index 357cef2a6f4c..7194bd138762 100644 --- a/drivers/staging/iio/adc/ad7291.c +++ b/drivers/staging/iio/adc/ad7291.c @@ -465,7 +465,7 @@ static int ad7291_probe(struct i2c_client *client, struct ad7291_platform_data *pdata = client->dev.platform_data; struct ad7291_chip_info *chip; struct iio_dev *indio_dev; - int ret = 0; + int ret; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip)); if (!indio_dev) @@ -475,7 +475,7 @@ static int ad7291_probe(struct i2c_client *client, if (pdata && pdata->use_external_ref) { chip->reg = devm_regulator_get(&client->dev, "vref"); if (IS_ERR(chip->reg)) - return ret; + return PTR_ERR(chip->reg); ret = regulator_enable(chip->reg); if (ret) diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig index 78b0fba7047e..8afc6fee40c5 100644 --- a/drivers/staging/media/omap4iss/Kconfig +++ b/drivers/staging/media/omap4iss/Kconfig @@ -1,6 +1,6 @@ config VIDEO_OMAP4 bool "OMAP 4 Camera support" - depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4 + depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4 select VIDEOBUF2_DMA_CONTIG ---help--- Driver for an OMAP 4 ISS controller. diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c index 8945b4e3a2a6..cb50120ed7b5 100644 --- a/drivers/staging/tidspbridge/core/tiomap3430.c +++ b/drivers/staging/tidspbridge/core/tiomap3430.c @@ -280,8 +280,10 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); /* Wait until the state has moved to ON */ - while (*pdata->dsp_prm_read(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST)& - OMAP_INTRANSITION_MASK); + while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, + OMAP2_PM_PWSTST) & + OMAP_INTRANSITION_MASK) + ; /* Disable Automatic transition */ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index a99c63152b8d..2c516f2eebed 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -306,7 +306,7 @@ static int imx_get_sensor_data(struct platform_device *pdev) { struct imx_thermal_data *data = platform_get_drvdata(pdev); struct regmap *map; - int t1, t2, n1, n2; + int t1, n1; int ret; u32 val; u64 temp64; @@ -333,14 +333,10 @@ static int imx_get_sensor_data(struct platform_device *pdev) /* * Sensor data layout: * [31:20] - sensor value @ 25C - * [19:8] - sensor value of hot - * [7:0] - hot temperature value * Use universal formula now and only need sensor value @ 25C * slope = 0.4297157 - (0.0015976 * 25C fuse) */ n1 = val >> 20; - n2 = (val & 0xfff00) >> 8; - t2 = val & 0xff; t1 = 25; /* t1 always 25C */ /* @@ -366,16 +362,16 @@ static int imx_get_sensor_data(struct platform_device *pdev) data->c2 = n1 * data->c1 + 1000 * t1; /* - * Set the default passive cooling trip point to 20 °C below the - * maximum die temperature. Can be changed from userspace. + * Set the default passive cooling trip point, + * can be changed from userspace. */ - data->temp_passive = 1000 * (t2 - 20); + data->temp_passive = IMX_TEMP_PASSIVE; /* - * The maximum die temperature is t2, let's give 5 °C cushion - * for noise and possible temperature rise between measurements. + * The maximum die temperature set to 20 C higher than + * IMX_TEMP_PASSIVE. */ - data->temp_critical = 1000 * (t2 - 5); + data->temp_critical = 1000 * 20 + data->temp_passive; return 0; } diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 04b1be7fa018..4b2b999b7611 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -156,8 +156,8 @@ static int of_thermal_bind(struct thermal_zone_device *thermal, ret = thermal_zone_bind_cooling_device(thermal, tbp->trip_id, cdev, - tbp->min, - tbp->max); + tbp->max, + tbp->min); if (ret) return ret; } @@ -712,11 +712,12 @@ thermal_of_build_thermal_zone(struct device_node *np) } i = 0; - for_each_child_of_node(child, gchild) + for_each_child_of_node(child, gchild) { ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++], tz->trips, tz->ntrips); if (ret) goto free_tbps; + } finish: of_node_put(child); diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index fdb07199d9c2..1967bee4f076 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -140,6 +140,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon, return NULL; } +static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz) +{ + unsigned long temp; + return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp); +} + int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; @@ -189,21 +195,18 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) if (result) goto free_temp_mem; - if (tz->ops->get_crit_temp) { - unsigned long temperature; - if (!tz->ops->get_crit_temp(tz, &temperature)) { - snprintf(temp->temp_crit.name, - sizeof(temp->temp_crit.name), + if (thermal_zone_crit_temp_valid(tz)) { + snprintf(temp->temp_crit.name, + sizeof(temp->temp_crit.name), "temp%d_crit", hwmon->count); - temp->temp_crit.attr.attr.name = temp->temp_crit.name; - temp->temp_crit.attr.attr.mode = 0444; - temp->temp_crit.attr.show = temp_crit_show; - sysfs_attr_init(&temp->temp_crit.attr.attr); - result = device_create_file(hwmon->device, - &temp->temp_crit.attr); - if (result) - goto unregister_input; - } + temp->temp_crit.attr.attr.name = temp->temp_crit.name; + temp->temp_crit.attr.attr.mode = 0444; + temp->temp_crit.attr.show = temp_crit_show; + sysfs_attr_init(&temp->temp_crit.attr.attr); + result = device_create_file(hwmon->device, + &temp->temp_crit.attr); + if (result) + goto unregister_input; } mutex_lock(&thermal_hwmon_list_lock); @@ -250,7 +253,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) } device_remove_file(hwmon->device, &temp->temp_input.attr); - if (tz->ops->get_crit_temp) + if (thermal_zone_crit_temp_valid(tz)) device_remove_file(hwmon->device, &temp->temp_crit.attr); mutex_lock(&thermal_hwmon_list_lock); diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index a1271b55103a..634b6ce0e63a 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c @@ -1155,7 +1155,7 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev) /* register shadow for context save and restore */ bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) * bgp->conf->sensor_count, GFP_KERNEL); - if (!bgp) { + if (!bgp->regval) { dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n"); return ERR_PTR(-ENOMEM); } diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c index c9f5c9dcc15c..008c223eaf26 100644 --- a/drivers/tty/serial/arc_uart.c +++ b/drivers/tty/serial/arc_uart.c @@ -177,7 +177,7 @@ static void arc_serial_tx_chars(struct arc_uart_port *uart) uart->port.icount.tx++; uart->port.x_char = 0; sent = 1; - } else if (xmit->tail != xmit->head) { /* TODO: uart_circ_empty */ + } else if (!uart_circ_empty(xmit)) { ch = xmit->buf[xmit->tail]; xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); uart->port.icount.tx++; diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index e2f93874989b..044e86d528ae 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -567,6 +567,9 @@ static void imx_start_tx(struct uart_port *port) struct imx_port *sport = (struct imx_port *)port; unsigned long temp; + if (uart_circ_empty(&port->state->xmit)) + return; + if (USE_IRDA(sport)) { /* half duplex in IrDA mode; have to disable receive mode */ temp = readl(sport->port.membase + UCR4); diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c index 1efd4c36ba0c..99b7b8697861 100644 --- a/drivers/tty/serial/ip22zilog.c +++ b/drivers/tty/serial/ip22zilog.c @@ -603,6 +603,8 @@ static void ip22zilog_start_tx(struct uart_port *port) } else { struct circ_buf *xmit = &port->state->xmit; + if (uart_circ_empty(xmit)) + return; writeb(xmit->buf[xmit->tail], &channel->data); ZSDELAY(); ZS_WSYNC(channel); diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c index 68f2c53e0b54..5702828fb62e 100644 --- a/drivers/tty/serial/m32r_sio.c +++ b/drivers/tty/serial/m32r_sio.c @@ -266,9 +266,11 @@ static void m32r_sio_start_tx(struct uart_port *port) if (!(up->ier & UART_IER_THRI)) { up->ier |= UART_IER_THRI; serial_out(up, UART_IER, up->ier); - serial_out(up, UART_TX, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - up->port.icount.tx++; + if (!uart_circ_empty(xmit)) { + serial_out(up, UART_TX, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + up->port.icount.tx++; + } } while((serial_in(up, UART_LSR) & UART_EMPTY) != UART_EMPTY); #else diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c index 8193635103ee..f7ad5b903055 100644 --- a/drivers/tty/serial/pmac_zilog.c +++ b/drivers/tty/serial/pmac_zilog.c @@ -653,6 +653,8 @@ static void pmz_start_tx(struct uart_port *port) } else { struct circ_buf *xmit = &port->state->xmit; + if (uart_circ_empty(xmit)) + goto out; write_zsdata(uap, xmit->buf[xmit->tail]); zssync(uap); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); @@ -661,6 +663,7 @@ static void pmz_start_tx(struct uart_port *port) if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uap->port); } + out: pmz_debug("pmz: start_tx() done.\n"); } diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c index 80a58eca785b..2f57df9a71d9 100644 --- a/drivers/tty/serial/sunsab.c +++ b/drivers/tty/serial/sunsab.c @@ -427,6 +427,9 @@ static void sunsab_start_tx(struct uart_port *port) struct circ_buf *xmit = &up->port.state->xmit; int i; + if (uart_circ_empty(xmit)) + return; + up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR); writeb(up->interrupt_mask1, &up->regs->w.imr1); diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c index a85db8b87156..02df3940b95e 100644 --- a/drivers/tty/serial/sunzilog.c +++ b/drivers/tty/serial/sunzilog.c @@ -703,6 +703,8 @@ static void sunzilog_start_tx(struct uart_port *port) } else { struct circ_buf *xmit = &port->state->xmit; + if (uart_circ_empty(xmit)) + return; writeb(xmit->buf[xmit->tail], &channel->data); ZSDELAY(); ZS_WSYNC(channel); diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 69425b3cb6b7..b8125aa64ad8 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1169,8 +1169,8 @@ static int ep_enable(struct usb_ep *ep, if (hwep->type == USB_ENDPOINT_XFER_CONTROL) cap |= QH_IOS; - if (hwep->num) - cap |= QH_ZLT; + + cap |= QH_ZLT; cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; /* * For ISO-TX, we set mult at QH as the largest value, and use @@ -1321,6 +1321,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); unsigned long flags; + struct td_node *node, *tmpnode; if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY || hwep->ep.desc == NULL || list_empty(&hwreq->queue) || @@ -1331,6 +1332,12 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) hw_ep_flush(hwep->ci, hwep->num, hwep->dir); + list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { + dma_pool_free(hwep->td_pool, node->ptr, node->dma); + list_del(&node->td); + kfree(node); + } + /* pop request */ list_del_init(&hwreq->queue); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 21b99b4b4082..0e950ad8cb25 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -889,6 +889,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1) if (!hub_is_superspeed(hub->hdev)) return -EINVAL; + ret = hub_port_status(hub, port1, &portstatus, &portchange); + if (ret < 0) + return ret; + + /* + * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI + * Controller [1022:7814] will have spurious result making the following + * usb 3.0 device hotplugging route to the 2.0 root hub and recognized + * as high-speed device if we set the usb 3.0 port link state to + * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we + * check the state here to avoid the bug. + */ + if ((portstatus & USB_PORT_STAT_LINK_STATE) == + USB_SS_PORT_LS_RX_DETECT) { + dev_dbg(&hub->ports[port1 - 1]->dev, + "Not disabling port; link state is RxDetect\n"); + return ret; + } + ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); if (ret) return ret; diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig index 8eb996e4f058..261c3b428220 100644 --- a/drivers/usb/dwc3/Kconfig +++ b/drivers/usb/dwc3/Kconfig @@ -45,6 +45,7 @@ comment "Platform Glue Driver Support" config USB_DWC3_OMAP tristate "Texas Instruments OMAP5 and similar Platforms" depends on EXTCON && (ARCH_OMAP2PLUS || COMPILE_TEST) + depends on OF default USB_DWC3 help Some platforms from Texas Instruments like OMAP5, DRA7xxx and diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 4af4c3567656..07a736acd0f2 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -322,7 +322,7 @@ static int dwc3_omap_remove_core(struct device *dev, void *c) { struct platform_device *pdev = to_platform_device(dev); - platform_device_unregister(pdev); + of_device_unregister(pdev); return 0; } @@ -599,7 +599,7 @@ static int dwc3_omap_prepare(struct device *dev) { struct dwc3_omap *omap = dev_get_drvdata(dev); - dwc3_omap_disable_irqs(omap); + dwc3_omap_write_irqmisc_set(omap, 0x00); return 0; } @@ -607,8 +607,19 @@ static int dwc3_omap_prepare(struct device *dev) static void dwc3_omap_complete(struct device *dev) { struct dwc3_omap *omap = dev_get_drvdata(dev); + u32 reg; - dwc3_omap_enable_irqs(omap); + reg = (USBOTGSS_IRQMISC_OEVT | + USBOTGSS_IRQMISC_DRVVBUS_RISE | + USBOTGSS_IRQMISC_CHRGVBUS_RISE | + USBOTGSS_IRQMISC_DISCHRGVBUS_RISE | + USBOTGSS_IRQMISC_IDPULLUP_RISE | + USBOTGSS_IRQMISC_DRVVBUS_FALL | + USBOTGSS_IRQMISC_CHRGVBUS_FALL | + USBOTGSS_IRQMISC_DISCHRGVBUS_FALL | + USBOTGSS_IRQMISC_IDPULLUP_FALL); + + dwc3_omap_write_irqmisc_set(omap, reg); } static int dwc3_omap_suspend(struct device *dev) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 9d64dd02c57e..dab7927d1009 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -828,10 +828,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, length, last ? " last" : "", chain ? " chain" : ""); - /* Skip the LINK-TRB on ISOC */ - if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && - usb_endpoint_xfer_isoc(dep->endpoint.desc)) - dep->free_slot++; trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; @@ -843,6 +839,10 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, } dep->free_slot++; + /* Skip the LINK-TRB on ISOC */ + if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && + usb_endpoint_xfer_isoc(dep->endpoint.desc)) + dep->free_slot++; trb->size = DWC3_TRB_SIZE_LENGTH(length); trb->bpl = lower_32_bits(dma); diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 2ddcd635ca2a..97142146eead 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1145,15 +1145,15 @@ static struct configfs_item_operations interf_item_ops = { .store_attribute = usb_os_desc_attr_store, }; -static ssize_t rndis_grp_compatible_id_show(struct usb_os_desc *desc, - char *page) +static ssize_t interf_grp_compatible_id_show(struct usb_os_desc *desc, + char *page) { memcpy(page, desc->ext_compat_id, 8); return 8; } -static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc, - const char *page, size_t len) +static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc, + const char *page, size_t len) { int l; @@ -1171,20 +1171,20 @@ static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc, return len; } -static struct usb_os_desc_attribute rndis_grp_attr_compatible_id = +static struct usb_os_desc_attribute interf_grp_attr_compatible_id = __CONFIGFS_ATTR(compatible_id, S_IRUGO | S_IWUSR, - rndis_grp_compatible_id_show, - rndis_grp_compatible_id_store); + interf_grp_compatible_id_show, + interf_grp_compatible_id_store); -static ssize_t rndis_grp_sub_compatible_id_show(struct usb_os_desc *desc, - char *page) +static ssize_t interf_grp_sub_compatible_id_show(struct usb_os_desc *desc, + char *page) { memcpy(page, desc->ext_compat_id + 8, 8); return 8; } -static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc, - const char *page, size_t len) +static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc, + const char *page, size_t len) { int l; @@ -1202,20 +1202,21 @@ static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc, return len; } -static struct usb_os_desc_attribute rndis_grp_attr_sub_compatible_id = +static struct usb_os_desc_attribute interf_grp_attr_sub_compatible_id = __CONFIGFS_ATTR(sub_compatible_id, S_IRUGO | S_IWUSR, - rndis_grp_sub_compatible_id_show, - rndis_grp_sub_compatible_id_store); + interf_grp_sub_compatible_id_show, + interf_grp_sub_compatible_id_store); static struct configfs_attribute *interf_grp_attrs[] = { - &rndis_grp_attr_compatible_id.attr, - &rndis_grp_attr_sub_compatible_id.attr, + &interf_grp_attr_compatible_id.attr, + &interf_grp_attr_sub_compatible_id.attr, NULL }; int usb_os_desc_prepare_interf_dir(struct config_group *parent, int n_interf, struct usb_os_desc **desc, + char **names, struct module *owner) { struct config_group **f_default_groups, *os_desc_group, @@ -1257,8 +1258,8 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent, d = desc[n_interf]; d->owner = owner; config_group_init_type_name(&d->group, "", interface_type); - config_item_set_name(&d->group.cg_item, "interface.%d", - n_interf); + config_item_set_name(&d->group.cg_item, "interface.%s", + names[n_interf]); interface_groups[n_interf] = &d->group; } diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h index a14ac792c698..36c468c4f5e9 100644 --- a/drivers/usb/gadget/configfs.h +++ b/drivers/usb/gadget/configfs.h @@ -8,6 +8,7 @@ void unregister_gadget_item(struct config_item *item); int usb_os_desc_prepare_interf_dir(struct config_group *parent, int n_interf, struct usb_os_desc **desc, + char **names, struct module *owner); static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index 74202d67f911..8598c27c7d43 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c @@ -1483,11 +1483,13 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev) ffs->ep0req->context = ffs; lang = ffs->stringtabs; - for (lang = ffs->stringtabs; *lang; ++lang) { - struct usb_string *str = (*lang)->strings; - int id = first_id; - for (; str->s; ++id, ++str) - str->id = id; + if (lang) { + for (; *lang; ++lang) { + struct usb_string *str = (*lang)->strings; + int id = first_id; + for (; str->s; ++id, ++str) + str->id = id; + } } ffs->gadget = cdev->gadget; diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index eed3ad878047..9c41e9515b8e 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -687,7 +687,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) f->os_desc_table = kzalloc(sizeof(*f->os_desc_table), GFP_KERNEL); if (!f->os_desc_table) - return PTR_ERR(f->os_desc_table); + return -ENOMEM; f->os_desc_n = 1; f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc; } @@ -905,6 +905,7 @@ static struct usb_function_instance *rndis_alloc_inst(void) { struct f_rndis_opts *opts; struct usb_os_desc *descs[1]; + char *names[1]; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) @@ -922,8 +923,9 @@ static struct usb_function_instance *rndis_alloc_inst(void) INIT_LIST_HEAD(&opts->rndis_os_desc.ext_prop); descs[0] = &opts->rndis_os_desc; + names[0] = "rndis"; usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, - THIS_MODULE); + names, THIS_MODULE); config_group_init_type_name(&opts->func_inst.group, "", &rndis_func_type); diff --git a/drivers/usb/gadget/gr_udc.c b/drivers/usb/gadget/gr_udc.c index 99a37ed03e27..c7004ee89c90 100644 --- a/drivers/usb/gadget/gr_udc.c +++ b/drivers/usb/gadget/gr_udc.c @@ -1532,8 +1532,9 @@ static int gr_ep_enable(struct usb_ep *_ep, "%s mode: multiple trans./microframe not valid\n", (mode == 2 ? "Bulk" : "Control")); return -EINVAL; - } else if (nt == 0x11) { - dev_err(dev->dev, "Invalid value for trans./microframe\n"); + } else if (nt == 0x3) { + dev_err(dev->dev, + "Invalid value 0x3 for additional trans./microframe\n"); return -EINVAL; } else if ((nt + 1) * max > buffer_size) { dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n", diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index ee6c16416c30..2e4ce7704908 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -1264,8 +1264,13 @@ dev_release (struct inode *inode, struct file *fd) kfree (dev->buf); dev->buf = NULL; - put_dev (dev); + /* other endpoints were all decoupled from this device */ + spin_lock_irq(&dev->lock); + dev->state = STATE_DEV_DISABLED; + spin_unlock_irq(&dev->lock); + + put_dev (dev); return 0; } diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index 3d78a8844e43..97b027724ee7 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c @@ -1120,7 +1120,10 @@ void gether_disconnect(struct gether *link) DBG(dev, "%s\n", __func__); + netif_tx_lock(dev->net); netif_stop_queue(dev->net); + netif_tx_unlock(dev->net); + netif_carrier_off(dev->net); /* disable endpoints, forcing (synchronous) completion diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 61b7817bd66b..03314f861bee 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -176,7 +176,7 @@ config USB_EHCI_HCD_AT91 config USB_EHCI_MSM tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller" - depends on ARCH_MSM + depends on ARCH_MSM || ARCH_QCOM select USB_EHCI_ROOT_HUB_TT ---help--- Enables support for the USB Host controller present on the diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 2b998c60faf2..aa79e8749040 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -22,6 +22,7 @@ #include <linux/slab.h> +#include <linux/device.h> #include <asm/unaligned.h> #include "xhci.h" @@ -1139,7 +1140,9 @@ int xhci_bus_suspend(struct usb_hcd *hcd) * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME * is enabled, so also enable remote wake here. */ - if (hcd->self.root_hub->do_remote_wakeup) { + if (hcd->self.root_hub->do_remote_wakeup + && device_may_wakeup(hcd->self.controller)) { + if (t1 & PORT_CONNECT) { t2 |= PORT_WKOC_E | PORT_WKDISC_E; t2 &= ~PORT_WKCONN_E; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index d67ff71209f5..749fc68eb5c1 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1433,8 +1433,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); break; case TRB_RESET_DEV: - WARN_ON(slot_id != TRB_TO_SLOT_ID( - le32_to_cpu(cmd_trb->generic.field[3]))); + /* SLOT_ID field in reset device cmd completion event TRB is 0. + * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) + */ + slot_id = TRB_TO_SLOT_ID( + le32_to_cpu(cmd_trb->generic.field[3])); xhci_handle_cmd_reset_dev(xhci, slot_id, event); break; case TRB_NEC_GET_FW: @@ -3534,7 +3537,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, return 0; max_burst = urb->ep->ss_ep_comp.bMaxBurst; - return roundup(total_packet_count, max_burst + 1) - 1; + return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; } /* diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2b8d9a24af09..7436d5f5e67a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -936,7 +936,7 @@ int xhci_suspend(struct xhci_hcd *xhci) */ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { - u32 command, temp = 0; + u32 command, temp = 0, status; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct usb_hcd *secondary_hcd; int retval = 0; @@ -1054,8 +1054,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) done: if (retval == 0) { - usb_hcd_resume_root_hub(hcd); - usb_hcd_resume_root_hub(xhci->shared_hcd); + /* Resume root hubs only when have pending events. */ + status = readl(&xhci->op_regs->status); + if (status & STS_EINT) { + usb_hcd_resume_root_hub(hcd); + usb_hcd_resume_root_hub(xhci->shared_hcd); + } } /* diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c index d2353781bd2d..1e58ed2361cc 100644 --- a/drivers/usb/musb/musb_am335x.c +++ b/drivers/usb/musb/musb_am335x.c @@ -19,21 +19,6 @@ err: return ret; } -static int of_remove_populated_child(struct device *dev, void *d) -{ - struct platform_device *pdev = to_platform_device(dev); - - of_device_unregister(pdev); - return 0; -} - -static int am335x_child_remove(struct platform_device *pdev) -{ - device_for_each_child(&pdev->dev, NULL, of_remove_populated_child); - pm_runtime_disable(&pdev->dev); - return 0; -} - static const struct of_device_id am335x_child_of_match[] = { { .compatible = "ti,am33xx-usb" }, { }, @@ -42,13 +27,17 @@ MODULE_DEVICE_TABLE(of, am335x_child_of_match); static struct platform_driver am335x_child_driver = { .probe = am335x_child_probe, - .remove = am335x_child_remove, .driver = { .name = "am335x-usb-childs", .of_match_table = am335x_child_of_match, }, }; -module_platform_driver(am335x_child_driver); +static int __init am335x_child_init(void) +{ + return platform_driver_register(&am335x_child_driver); +} +module_init(am335x_child_init); + MODULE_DESCRIPTION("AM33xx child devices"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 61da471b7aed..eff3c5cf84f4 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -849,7 +849,7 @@ b_host: } /* handle babble condition */ - if (int_usb & MUSB_INTR_BABBLE) + if (int_usb & MUSB_INTR_BABBLE && is_host_active(musb)) schedule_work(&musb->recover_work); #if 0 diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index 7b8bbf53127e..5341bb223b7c 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c @@ -318,7 +318,7 @@ static void cppi41_dma_callback(void *private_data) } list_add_tail(&cppi41_channel->tx_check, &controller->early_tx_list); - if (!hrtimer_active(&controller->early_tx)) { + if (!hrtimer_is_queued(&controller->early_tx)) { hrtimer_start_range_ns(&controller->early_tx, ktime_set(0, 140 * NSEC_PER_USEC), 40 * NSEC_PER_USEC, diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 51beb13c7e1a..09529f94e72d 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -494,10 +494,9 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode) struct dsps_glue *glue = dev_get_drvdata(dev->parent); const struct dsps_musb_wrapper *wrp = glue->wrp; void __iomem *ctrl_base = musb->ctrl_base; - void __iomem *base = musb->mregs; u32 reg; - reg = dsps_readl(base, wrp->mode); + reg = dsps_readl(ctrl_base, wrp->mode); switch (mode) { case MUSB_HOST: @@ -510,7 +509,7 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode) */ reg |= (1 << wrp->iddig_mux); - dsps_writel(base, wrp->mode, reg); + dsps_writel(ctrl_base, wrp->mode, reg); dsps_writel(ctrl_base, wrp->phy_utmi, 0x02); break; case MUSB_PERIPHERAL: @@ -523,10 +522,10 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode) */ reg |= (1 << wrp->iddig_mux); - dsps_writel(base, wrp->mode, reg); + dsps_writel(ctrl_base, wrp->mode, reg); break; case MUSB_OTG: - dsps_writel(base, wrp->phy_utmi, 0x02); + dsps_writel(ctrl_base, wrp->phy_utmi, 0x02); break; default: dev_err(glue->dev, "unsupported mode %d\n", mode); diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index c2e45e632723..f202e5088461 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c @@ -274,7 +274,6 @@ static int ux500_probe(struct platform_device *pdev) musb->dev.parent = &pdev->dev; musb->dev.dma_mask = &pdev->dev.coherent_dma_mask; musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; - musb->dev.of_node = pdev->dev.of_node; glue->dev = &pdev->dev; glue->musb = musb; diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index ced34f39bdd4..c929370cdaa6 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -1229,7 +1229,9 @@ static void msm_otg_sm_work(struct work_struct *w) motg->chg_state = USB_CHG_STATE_UNDEFINED; motg->chg_type = USB_INVALID_CHARGER; } - pm_runtime_put_sync(otg->phy->dev); + + if (otg->phy->state == OTG_STATE_B_IDLE) + pm_runtime_put_sync(otg->phy->dev); break; case OTG_STATE_B_PERIPHERAL: dev_dbg(otg->phy->dev, "OTG_STATE_B_PERIPHERAL state\n"); diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index d49f9c326035..4fd36530bfa3 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -681,6 +681,14 @@ usbhs_fifo_read_end: usbhs_pipe_number(pipe), pkt->length, pkt->actual, *is_done, pkt->zero); + /* + * Transmission end + */ + if (*is_done) { + if (usbhs_pipe_is_dcp(pipe)) + usbhs_dcp_control_transfer_done(pipe); + } + usbhs_fifo_read_busy: usbhsf_fifo_unselect(pipe, fifo); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 762e4a5f5ae9..330df5ce435b 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index edf3b124583c..8a3813be1b28 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -720,7 +720,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, - { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) }, + { USB_DEVICE(TESTO_VID, TESTO_1_PID) }, + { USB_DEVICE(TESTO_VID, TESTO_3_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, @@ -944,6 +945,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, + /* Infineon Devices */ + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, { } /* Terminating entry */ }; @@ -1566,14 +1569,17 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port) struct usb_device *udev = serial->dev; struct usb_interface *interface = serial->interface; - struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc; + struct usb_endpoint_descriptor *ep_desc; unsigned num_endpoints; - int i; + unsigned i; num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); + if (!num_endpoints) + return; + /* NOTE: some customers have programmed FT232R/FT245R devices * with an endpoint size of 0 - not good. In this case, we * want to override the endpoint descriptor setting and use a diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 500474c48f4b..c4777bc6aee0 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -584,6 +584,12 @@ #define RATOC_PRODUCT_ID_USB60F 0xb020 /* + * Infineon Technologies + */ +#define INFINEON_VID 0x058b +#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ + +/* * Acton Research Corp. */ #define ACTON_VID 0x0647 /* Vendor ID */ @@ -798,7 +804,8 @@ * Submitted by Colin Leroy */ #define TESTO_VID 0x128D -#define TESTO_USB_INTERFACE_PID 0x0001 +#define TESTO_1_PID 0x0001 +#define TESTO_3_PID 0x0003 /* * Mobility Electronics products. diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 59c3108cc136..a9688940543d 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -352,6 +352,9 @@ static void option_instat_callback(struct urb *urb); /* Zoom */ #define ZOOM_PRODUCT_4597 0x9607 +/* SpeedUp SU9800 usb 3g modem */ +#define SPEEDUP_PRODUCT_SU9800 0x9800 + /* Haier products */ #define HAIER_VENDOR_ID 0x201e #define HAIER_PRODUCT_CE100 0x2009 @@ -372,8 +375,12 @@ static void option_instat_callback(struct urb *urb); /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 +#define OLIVETTI_PRODUCT_OLICARD120 0xc001 +#define OLIVETTI_PRODUCT_OLICARD140 0xc002 #define OLIVETTI_PRODUCT_OLICARD145 0xc003 +#define OLIVETTI_PRODUCT_OLICARD155 0xc004 #define OLIVETTI_PRODUCT_OLICARD200 0xc005 +#define OLIVETTI_PRODUCT_OLICARD160 0xc00a #define OLIVETTI_PRODUCT_OLICARD500 0xc00b /* Celot products */ @@ -1480,6 +1487,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, @@ -1577,6 +1586,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, @@ -1611,15 +1621,21 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, - - { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200), - .driver_info = (kernel_ulong_t)&net_intf6_blacklist - }, + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist - }, + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 9d38ddc8da49..866b5df36ed1 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -256,6 +256,10 @@ static int slave_configure(struct scsi_device *sdev) if (us->fflags & US_FL_WRITE_CACHE) sdev->wce_default_on = 1; + /* A few buggy USB-ATA bridges don't understand FUA */ + if (us->fflags & US_FL_BROKEN_FUA) + sdev->broken_fua = 1; + } else { /* Non-disk-type devices don't need to blacklist any pages diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 174a447868cd..80a5b366255f 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1936,6 +1936,13 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* Reported by Michael Büsch <m@bues.ch> */ +UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0114, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + /* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br> * JMicron responds to USN and several other SCSI ioctls with a * residue that causes subsequent I/O requests to fail. */ diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index e683b6ef9594..d36e830d6fc6 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -1057,6 +1057,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) goto put_display_node; } + INIT_LIST_HEAD(&pdata->pwr_gpios); ret = -ENOMEM; for (i = 0; i < of_gpio_named_count(display_np, "atmel,power-control-gpio"); i++) { gpio = of_get_named_gpio_flags(display_np, "atmel,power-control-gpio", @@ -1082,6 +1083,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) dev_err(dev, "set direction output gpio %d failed\n", gpio); goto put_display_node; } + list_add(&og->list, &pdata->pwr_gpios); } if (is_gpio_power) diff --git a/drivers/video/fbdev/bfin_adv7393fb.c b/drivers/video/fbdev/bfin_adv7393fb.c index a54f7f7d763b..8fe41caac38e 100644 --- a/drivers/video/fbdev/bfin_adv7393fb.c +++ b/drivers/video/fbdev/bfin_adv7393fb.c @@ -408,7 +408,7 @@ static int bfin_adv7393_fb_probe(struct i2c_client *client, /* Workaround "PPI Does Not Start Properly In Specific Mode" */ if (ANOMALY_05000400) { ret = gpio_request_one(P_IDENT(P_PPI0_FS3), GPIOF_OUT_INIT_LOW, - "PPI0_FS3") + "PPI0_FS3"); if (ret) { dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n"); ret = -EBUSY; diff --git a/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c b/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c index 99af9e88b2d8..2f0822ee3ff9 100644 --- a/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c +++ b/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c @@ -121,9 +121,11 @@ static void __init omapdss_add_to_list(struct device_node *node, bool root) { struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node), GFP_KERNEL); - n->node = node; - n->root = root; - list_add(&n->list, &dss_conv_list); + if (n) { + n->node = node; + n->root = root; + list_add(&n->list, &dss_conv_list); + } } static bool __init omapdss_list_contains(const struct device_node *node) diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c index a8f2b280f796..a1134c3f6c11 100644 --- a/drivers/video/fbdev/vt8500lcdfb.c +++ b/drivers/video/fbdev/vt8500lcdfb.c @@ -474,8 +474,6 @@ static int vt8500lcd_remove(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); - kfree(fbi); - return 0; } diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index b7a506f2bb14..5c660c77f03b 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -426,20 +426,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) * p2m are consistent. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { - unsigned long p; - struct page *scratch_page = get_balloon_scratch_page(); - if (!PageHighMem(page)) { + struct page *scratch_page = get_balloon_scratch_page(); + ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte(page_to_pfn(scratch_page), PAGE_KERNEL_RO), 0); BUG_ON(ret); - } - p = page_to_pfn(scratch_page); - __set_phys_to_machine(pfn, pfn_to_mfn(p)); - put_balloon_scratch_page(); + put_balloon_scratch_page(); + } + __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } #endif diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index c3667b202f2f..5f1e1f3cd186 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -88,7 +88,6 @@ static int xen_suspend(void *data) if (!si->cancelled) { xen_irq_resume(); - xen_console_resume(); xen_timer_resume(); } @@ -135,6 +134,10 @@ static void do_suspend(void) err = stop_machine(xen_suspend, &si, cpumask_of(0)); + /* Resume console as early as possible. */ + if (!si.cancelled) + xen_console_resume(); + raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); |